hbase git commit: HBASE-16781 Fix flaky TestMasterProcedureWalLease

2016-10-07 Thread mbertozzi
Repository: hbase
Updated Branches:
  refs/heads/master c7cae6be3 -> 29d701a31


HBASE-16781 Fix flaky TestMasterProcedureWalLease


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/29d701a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/29d701a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/29d701a3

Branch: refs/heads/master
Commit: 29d701a314b6bf56771a217b42c4c10832b15753
Parents: c7cae6b
Author: Matteo Bertozzi 
Authored: Fri Oct 7 17:32:19 2016 -0700
Committer: Matteo Bertozzi 
Committed: Fri Oct 7 18:01:53 2016 -0700

--
 .../procedure2/store/wal/WALProcedureStore.java | 41 +---
 .../hadoop/hbase/master/MasterServices.java |  5 +++
 .../master/procedure/MasterProcedureEnv.java|  7 +++-
 .../hbase/master/MockNoopMasterServices.java|  5 +++
 .../MasterProcedureTestingUtility.java  |  1 +
 5 files changed, 43 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/29d701a3/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 36cf7af..1e60402 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -122,6 +122,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
   private final AtomicBoolean inSync = new AtomicBoolean(false);
   private final AtomicLong totalSynced = new AtomicLong(0);
   private final AtomicLong lastRollTs = new AtomicLong(0);
+  private final AtomicLong syncId = new AtomicLong(0);
 
   private LinkedTransferQueue slotsCache = null;
   private Set corruptedLogs = null;
@@ -226,15 +227,15 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   }
 
   @Override
-  public void stop(boolean abort) {
+  public void stop(final boolean abort) {
 if (!setRunning(false)) {
   return;
 }
 
-LOG.info("Stopping the WAL Procedure Store");
+LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort +
+  (isSyncAborted() ? " (self aborting)" : ""));
 sendStopSignal();
-
-if (!abort) {
+if (!isSyncAborted()) {
   try {
 while (syncThread.isAlive()) {
   sendStopSignal();
@@ -525,6 +526,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
 }
   }
 
+  final long pushSyncId = syncId.get();
   updateStoreTracker(type, procId, subProcIds);
   slots[slotIndex++] = slot;
   logId = flushLogId;
@@ -540,7 +542,9 @@ public class WALProcedureStore extends ProcedureStoreBase {
 slotCond.signal();
   }
 
-  syncCond.await();
+  while (pushSyncId == syncId.get() && isRunning()) {
+syncCond.await();
+  }
 } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
   sendAbortProcessSignal();
@@ -642,13 +646,15 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   totalSyncedToStore = totalSynced.addAndGet(slotSize);
   slotIndex = 0;
   inSync.set(false);
+  syncId.incrementAndGet();
 } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
-  sendAbortProcessSignal();
   syncException.compareAndSet(null, e);
+  sendAbortProcessSignal();
   throw e;
 } catch (Throwable t) {
   syncException.compareAndSet(null, t);
+  sendAbortProcessSignal();
   throw t;
 } finally {
   syncCond.signalAll();
@@ -679,13 +685,12 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   } catch (Throwable e) {
 LOG.warn("unable to sync slots, retry=" + retry);
 if (++retry >= maxRetriesBeforeRoll) {
-  if (logRolled >= maxSyncFailureRoll) {
+  if (logRolled >= maxSyncFailureRoll && isRunning()) {
 LOG.error("Sync slots after log roll failed, abort.", e);
-sendAbortProcessSignal();
 throw e;
   }
 
-  if (!rollWriterOrDie()) {
+  if (!rollWriterWithRetries()) {
 throw e;
   }
 
@@ -720,8 +725,8 @@ public class WALProcedureStore extends ProcedureStoreBase {
 return totalSynced;
   }
 
-  private boolean rollWriterOrDie() {
-for (int i = 0; i < rollRetries; ++i) {
+  private boolean rollWriterWithRetries() {
+for (int i = 0; i < 

hbase git commit: 1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the code in compactMobFilesInBatch() to make it more readable.

2016-10-07 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/master 723d56153 -> c7cae6be3


1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the 
code in compactMobFilesInBatch() to make it more readable.

Signed-off-by: Jonathan M Hsieh 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c7cae6be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c7cae6be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c7cae6be

Branch: refs/heads/master
Commit: c7cae6be3dccfaa63033b705ea9845f3f088aab6
Parents: 723d561
Author: Huaxiang Sun 
Authored: Fri Oct 7 15:47:06 2016 -0700
Committer: Jonathan M Hsieh 
Committed: Fri Oct 7 17:49:27 2016 -0700

--
 .../compactions/PartitionedMobCompactor.java| 157 +++
 .../TestPartitionedMobCompactor.java|  90 ++-
 2 files changed, 178 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c7cae6be/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index 29b7e8a..33aecc0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -229,8 +229,8 @@ public class PartitionedMobCompactor extends MobCompactor {
 }
 // archive the del files if all the mob files are selected.
 if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) {
-  LOG.info("After a mob compaction with all files selected, archiving the 
del files "
-+ newDelPaths);
+  LOG.info(
+  "After a mob compaction with all files selected, archiving the del 
files " + newDelPaths);
   try {
 MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, 
column.getName(), newDelFiles);
   } catch (IOException e) {
@@ -381,7 +381,7 @@ public class PartitionedMobCompactor extends MobCompactor {
   List filesToCompact, int 
batch,
   Path bulkloadPathOfPartition, Path 
bulkloadColumnPath,
   List newFiles)
-throws IOException {
+  throws IOException {
 // open scanner to the selected mob files and del files.
 StoreScanner scanner = createScanner(filesToCompact, 
ScanType.COMPACT_DROP_DELETES);
 // the mob files to be compacted, not include the del files.
@@ -392,62 +392,92 @@ public class PartitionedMobCompactor extends MobCompactor 
{
 StoreFileWriter writer = null;
 StoreFileWriter refFileWriter = null;
 Path filePath = null;
-Path refFilePath = null;
 long mobCells = 0;
+boolean cleanupTmpMobFile = false;
+boolean cleanupBulkloadDirOfPartition = false;
+boolean cleanupCommittedMobFile = false;
+boolean closeReaders= true;
+
 try {
-  writer = MobUtils.createWriter(conf, fs, column, 
partition.getPartitionId().getDate(),
-tempPath, Long.MAX_VALUE, column.getCompactionCompressionType(), 
partition.getPartitionId()
-  .getStartKey(), compactionCacheConfig, cryptoContext);
-  filePath = writer.getPath();
-  byte[] fileName = Bytes.toBytes(filePath.getName());
-  // create a temp file and open a writer for it in the bulkloadPath
-  refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, 
bulkloadColumnPath, fileInfo
-.getSecond().longValue(), compactionCacheConfig, cryptoContext);
-  refFilePath = refFileWriter.getPath();
-  List cells = new ArrayList<>();
-  boolean hasMore;
-  ScannerContext scannerContext =
-  
ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
-  do {
-hasMore = scanner.next(cells, scannerContext);
-for (Cell cell : cells) {
-  // write the mob cell to the mob file.
-  writer.append(cell);
-  // write the new reference cell to the store file.
-  KeyValue reference = MobUtils.createMobRefKeyValue(cell, fileName, 
tableNameTag);
-  refFileWriter.append(reference);
-  mobCells++;
+  try {
+writer = MobUtils
+.createWriter(conf, fs, column, 
partition.getPartitionId().getDate(), tempPath,
+Long.MAX_VALUE, column.getCompactionCompressionType(),
+partition.getPartitionId().getStartKey(), 
compactionCacheConfig, cryptoContext);
+

[1/2] hbase git commit: HBASE-16657 Expose per-region last major compaction time in RegionServer UI

2016-10-07 Thread garyh
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 a52188f97 -> 0704aed44


HBASE-16657 Expose per-region last major compaction time in RegionServer UI

Signed-off-by: Gary Helmling 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b32e7ced
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b32e7ced
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b32e7ced

Branch: refs/heads/branch-1.3
Commit: b32e7ced261e7bb9164ed2094407dd084e0715f9
Parents: a52188f
Author: Dustin Pho 
Authored: Sat Sep 24 17:53:55 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 14:20:19 2016 -0700

--
 .../hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b32e7ced/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index b058245..4c35cf4 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -22,6 +22,7 @@
 
 <%import>
 java.util.*;
+org.apache.commons.lang.time.FastDateFormat;
 org.apache.hadoop.hbase.regionserver.HRegionServer;
 org.apache.hadoop.hbase.util.Bytes;
 org.apache.hadoop.hbase.HRegionInfo;
@@ -191,6 +192,7 @@
 Num. Compacting KVs
 Num. Compacted KVs
 Compaction Progress
+Last Major Compaction
 
 
 <%for HRegionInfo r: onlineRegions %>
@@ -203,6 +205,12 @@
 percentDone = String.format("%.2f", 100 *
 ((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
 }
+long lastMajorCompactionTs = load.getLastMajorCompactionTs();
+String compactTime = "";
+if (lastMajorCompactionTs > 0) {
+  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+  compactTime = fdf.format(lastMajorCompactionTs);
+}
 
 
   <% r.getRegionNameAsString() %>
@@ -211,6 +219,7 @@
 <% load.getTotalCompactingKVs() %>
 <% load.getCurrentCompactedKVs() %>
 <% percentDone %>
+<% compactTime %>
 
 
 



[2/2] hbase git commit: HBASE-16657 addendum handle null RegionLoad

2016-10-07 Thread garyh
HBASE-16657 addendum handle null RegionLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0704aed4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0704aed4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0704aed4

Branch: refs/heads/branch-1.3
Commit: 0704aed44222afe2f730f1681c7893ba6e2e9a8a
Parents: b32e7ce
Author: Gary Helmling 
Authored: Fri Oct 7 12:01:46 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 14:20:33 2016 -0700

--
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   | 17 +
 1 file changed, 9 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0704aed4/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index 4c35cf4..c472115 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -201,15 +201,16 @@
 <%java>
 RegionLoad load = 
regionServer.createRegionLoad(r.getEncodedName());
 String percentDone = "";
-if  (load != null && load.getTotalCompactingKVs() > 0) {
-percentDone = String.format("%.2f", 100 *
-((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
-}
-long lastMajorCompactionTs = load.getLastMajorCompactionTs();
 String compactTime = "";
-if (lastMajorCompactionTs > 0) {
-  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
-  compactTime = fdf.format(lastMajorCompactionTs);
+if  (load != null) {
+  if (load.getTotalCompactingKVs() > 0) {
+percentDone = String.format("%.2f", 100 *
+((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
+  }
+  if (load.getLastMajorCompactionTs() > 0) {
+FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+compactTime = fdf.format(load.getLastMajorCompactionTs());
+  }
 }
 
 



[1/2] hbase git commit: HBASE-16657 Expose per-region last major compaction time in RegionServer UI

2016-10-07 Thread garyh
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7092dc3ec -> bbaa0e851


HBASE-16657 Expose per-region last major compaction time in RegionServer UI

Signed-off-by: Gary Helmling 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fa98c506
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fa98c506
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fa98c506

Branch: refs/heads/branch-1
Commit: fa98c506d022adc5c6315ab20051617199844e53
Parents: 7092dc3
Author: Dustin Pho 
Authored: Sat Sep 24 17:53:55 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 14:04:22 2016 -0700

--
 .../hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fa98c506/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index b058245..4c35cf4 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -22,6 +22,7 @@
 
 <%import>
 java.util.*;
+org.apache.commons.lang.time.FastDateFormat;
 org.apache.hadoop.hbase.regionserver.HRegionServer;
 org.apache.hadoop.hbase.util.Bytes;
 org.apache.hadoop.hbase.HRegionInfo;
@@ -191,6 +192,7 @@
 Num. Compacting KVs
 Num. Compacted KVs
 Compaction Progress
+Last Major Compaction
 
 
 <%for HRegionInfo r: onlineRegions %>
@@ -203,6 +205,12 @@
 percentDone = String.format("%.2f", 100 *
 ((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
 }
+long lastMajorCompactionTs = load.getLastMajorCompactionTs();
+String compactTime = "";
+if (lastMajorCompactionTs > 0) {
+  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+  compactTime = fdf.format(lastMajorCompactionTs);
+}
 
 
   <% r.getRegionNameAsString() %>
@@ -211,6 +219,7 @@
 <% load.getTotalCompactingKVs() %>
 <% load.getCurrentCompactedKVs() %>
 <% percentDone %>
+<% compactTime %>
 
 
 



[2/2] hbase git commit: HBASE-16657 addendum handle null RegionLoad

2016-10-07 Thread garyh
HBASE-16657 addendum handle null RegionLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bbaa0e85
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bbaa0e85
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bbaa0e85

Branch: refs/heads/branch-1
Commit: bbaa0e851db083a793378c314ef27467ec795800
Parents: fa98c50
Author: Gary Helmling 
Authored: Fri Oct 7 12:01:46 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 14:05:30 2016 -0700

--
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   | 17 +
 1 file changed, 9 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bbaa0e85/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index 4c35cf4..c472115 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -201,15 +201,16 @@
 <%java>
 RegionLoad load = 
regionServer.createRegionLoad(r.getEncodedName());
 String percentDone = "";
-if  (load != null && load.getTotalCompactingKVs() > 0) {
-percentDone = String.format("%.2f", 100 *
-((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
-}
-long lastMajorCompactionTs = load.getLastMajorCompactionTs();
 String compactTime = "";
-if (lastMajorCompactionTs > 0) {
-  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
-  compactTime = fdf.format(lastMajorCompactionTs);
+if  (load != null) {
+  if (load.getTotalCompactingKVs() > 0) {
+percentDone = String.format("%.2f", 100 *
+((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
+  }
+  if (load.getLastMajorCompactionTs() > 0) {
+FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+compactTime = fdf.format(load.getLastMajorCompactionTs());
+  }
 }
 
 



hbase git commit: HBASE-16657 addendum handle null RegionLoad

2016-10-07 Thread garyh
Repository: hbase
Updated Branches:
  refs/heads/master bc9a97245 -> 723d56153


HBASE-16657 addendum handle null RegionLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/723d5615
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/723d5615
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/723d5615

Branch: refs/heads/master
Commit: 723d56153f908421dc5abcea8471bacf995cb6d3
Parents: bc9a972
Author: Gary Helmling 
Authored: Fri Oct 7 12:01:46 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 13:29:19 2016 -0700

--
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon | 19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/723d5615/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index b393137..96c3a96 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -207,18 +207,19 @@
 <%java>
 RegionLoad load = 
regionServer.createRegionLoad(r.getEncodedName());
 String percentDone = "";
-if  (load != null && load.getTotalCompactingKVs() > 0) {
-percentDone = String.format("%.2f", 100 *
-((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
+String compactTime = "";
+if  (load != null) {
+  if (load.getTotalCompactingKVs() > 0) {
+percentDone = String.format("%.2f", 100 *
+((float) load.getCurrentCompactedKVs() / 
load.getTotalCompactingKVs())) + "%";
+  }
+  if (load.getLastMajorCompactionTs() > 0) {
+FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+compactTime = fdf.format(load.getLastMajorCompactionTs());
+  }
 }
 String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
   regionServer.getConfiguration());
-long lastMajorCompactionTs = load.getLastMajorCompactionTs();
-String compactTime = "";
-if (lastMajorCompactionTs > 0) {
-  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
-  compactTime = fdf.format(lastMajorCompactionTs);
-}
 
 <% 
displayName %>
 <%if load != null %>



[57/77] [abbrv] hbase git commit: HBASE-16750 hbase compilation failed on power system - revert due to failure in mvn site

2016-10-07 Thread syuanjiang
HBASE-16750 hbase compilation failed on power system - revert due to failure in 
mvn site


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/34ad9652
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/34ad9652
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/34ad9652

Branch: refs/heads/hbase-12439
Commit: 34ad9652ae1b15722db0e75a447283eba614f24c
Parents: b30b6df
Author: tedyu 
Authored: Tue Oct 4 09:24:47 2016 -0700
Committer: tedyu 
Committed: Tue Oct 4 09:24:47 2016 -0700

--
 pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/34ad9652/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a796abb..2d341c0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1009,7 +1009,7 @@
   
 org.asciidoctor
 asciidoctorj-pdf
-1.5.0-alpha.11
+1.5.0-alpha.6
   
 
 
@@ -1233,7 +1233,7 @@
 1.3.9-1
 6.18
 2.10.3
-1.5.3
+1.5.2.1
 
 /usr
 /etc/hbase



[34/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
new file mode 100644
index 000..7ba5b8e
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
@@ -0,0 +1,4059 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: IncrementCounterProcessor.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class IncrementCounterProcessorTestProtos {
+  private IncrementCounterProcessorTestProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface IncCounterProcessorRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes row = 1;
+/**
+ * required bytes row = 1;
+ */
+boolean hasRow();
+/**
+ * required bytes row = 1;
+ */
+com.google.protobuf.ByteString getRow();
+
+// required int32 counter = 2;
+/**
+ * required int32 counter = 2;
+ */
+boolean hasCounter();
+/**
+ * required int32 counter = 2;
+ */
+int getCounter();
+  }
+  /**
+   * Protobuf type {@code IncCounterProcessorRequest}
+   */
+  public static final class IncCounterProcessorRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements IncCounterProcessorRequestOrBuilder {
+// Use IncCounterProcessorRequest.newBuilder() to construct.
+private 
IncCounterProcessorRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private IncCounterProcessorRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final IncCounterProcessorRequest defaultInstance;
+public static IncCounterProcessorRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public IncCounterProcessorRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private IncCounterProcessorRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  row_ = input.readBytes();
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  counter_ = input.readInt32();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.internal_static_IncCounterProcessorRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.internal_static_IncCounterProcessorRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  

[12/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
new file mode 100644
index 000..057100b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
@@ -0,0 +1,2280 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.LimitedInputStream;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageLite.EqualsVisitor.NotEqualsException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.BooleanList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.DoubleList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.FloatList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.IntList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.LongList;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.ProtobufList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FieldType;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Lite version of {@link GeneratedMessage}.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class GeneratedMessageLite<
+MessageType extends GeneratedMessageLite,
+BuilderType extends GeneratedMessageLite.Builder>
+extends AbstractMessageLite {
+
+  /** For use by generated code only. Lazily initialized to reduce 
allocations. */
+  protected UnknownFieldSetLite unknownFields = 
UnknownFieldSetLite.getDefaultInstance();
+
+  /** For use by generated code only.  */
+  protected int memoizedSerializedSize = -1;
+
+  @Override
+  @SuppressWarnings("unchecked") // Guaranteed by runtime.
+  public final Parser getParserForType() {
+return (Parser) dynamicMethod(MethodToInvoke.GET_PARSER);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked") // Guaranteed by runtime.
+  public final MessageType getDefaultInstanceForType() {
+return (MessageType) dynamicMethod(MethodToInvoke.GET_DEFAULT_INSTANCE);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked") // Guaranteed by runtime.
+  public final BuilderType newBuilderForType() {
+return (BuilderType) dynamicMethod(MethodToInvoke.NEW_BUILDER);
+  }
+
+  /**
+   * A reflective toString function. This is primarily intended as a developer 
aid, while keeping
+   * binary size down. The first line of the {@code toString()} 

[38/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
new file mode 100644
index 000..61b47ff
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
@@ -0,0 +1,1277 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ColumnAggregationProtocol.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class ColumnAggregationProtos {
+  private ColumnAggregationProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface SumRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// optional bytes qualifier = 2;
+/**
+ * optional bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * optional bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code SumRequest}
+   */
+  public static final class SumRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements SumRequestOrBuilder {
+// Use SumRequest.newBuilder() to construct.
+private SumRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SumRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SumRequest defaultInstance;
+public static SumRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SumRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SumRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.internal_static_SumRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.internal_static_SumRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumRequest.class,
 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new 

[33/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
new file mode 100644
index 000..508790c
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
@@ -0,0 +1,2375 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Aggregate.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class AggregateProtos {
+  private AggregateProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface AggregateRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string interpreter_class_name = 1;
+/**
+ * required string interpreter_class_name = 1;
+ *
+ * 
+ ** The request passed to the AggregateService consists of three parts
+ *  (1) the (canonical) classname of the ColumnInterpreter implementation
+ *  (2) the Scan query
+ *  (3) any bytes required to construct the ColumnInterpreter object
+ *  properly
+ * 
+ */
+boolean hasInterpreterClassName();
+/**
+ * required string interpreter_class_name = 1;
+ *
+ * 
+ ** The request passed to the AggregateService consists of three parts
+ *  (1) the (canonical) classname of the ColumnInterpreter implementation
+ *  (2) the Scan query
+ *  (3) any bytes required to construct the ColumnInterpreter object
+ *  properly
+ * 
+ */
+java.lang.String getInterpreterClassName();
+/**
+ * required string interpreter_class_name = 1;
+ *
+ * 
+ ** The request passed to the AggregateService consists of three parts
+ *  (1) the (canonical) classname of the ColumnInterpreter implementation
+ *  (2) the Scan query
+ *  (3) any bytes required to construct the ColumnInterpreter object
+ *  properly
+ * 
+ */
+com.google.protobuf.ByteString
+getInterpreterClassNameBytes();
+
+// required .hbase.pb.Scan scan = 2;
+/**
+ * required .hbase.pb.Scan scan = 2;
+ */
+boolean hasScan();
+/**
+ * required .hbase.pb.Scan scan = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan();
+/**
+ * required .hbase.pb.Scan scan = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder 
getScanOrBuilder();
+
+// optional bytes interpreter_specific_bytes = 3;
+/**
+ * optional bytes interpreter_specific_bytes = 3;
+ */
+boolean hasInterpreterSpecificBytes();
+/**
+ * optional bytes interpreter_specific_bytes = 3;
+ */
+com.google.protobuf.ByteString getInterpreterSpecificBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.AggregateRequest}
+   */
+  public static final class AggregateRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements AggregateRequestOrBuilder {
+// Use AggregateRequest.newBuilder() to construct.
+private AggregateRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private AggregateRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final AggregateRequest defaultInstance;
+public static AggregateRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public AggregateRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private AggregateRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+  

[19/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
new file mode 100644
index 000..88effb2
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
@@ -0,0 +1,273 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.DoubleList;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.RandomAccess;
+
+/**
+ * An implementation of {@link DoubleList} on top of a primitive array.
+ *
+ * @author dw...@google.com (Daniel Weis)
+ */
+final class DoubleArrayList
+extends AbstractProtobufList
+implements DoubleList, RandomAccess {
+
+  private static final DoubleArrayList EMPTY_LIST = new DoubleArrayList();
+  static {
+EMPTY_LIST.makeImmutable();
+  }
+
+  public static DoubleArrayList emptyList() {
+return EMPTY_LIST;
+  }
+
+  /**
+   * The backing store for the list.
+   */
+  private double[] array;
+
+  /**
+   * The size of the list distinct from the length of the array. That is, it 
is the number of
+   * elements set in the list.
+   */
+  private int size;
+
+  /**
+   * Constructs a new mutable {@code DoubleArrayList} with default capacity.
+   */
+  DoubleArrayList() {
+this(new double[DEFAULT_CAPACITY], 0);
+  }
+
+  /**
+   * Constructs a new mutable {@code DoubleArrayList}
+   * containing the same elements as {@code other}.
+   */
+  private DoubleArrayList(double[] other, int size) {
+array = other;
+this.size = size;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (!(o instanceof DoubleArrayList)) {
+  return super.equals(o);
+}
+DoubleArrayList other = (DoubleArrayList) o;
+if (size != other.size) {
+  return false;
+}
+
+final double[] arr = other.array;
+for (int i = 0; i < size; i++) {
+  if (array[i] != arr[i]) {
+return false;
+  }
+}
+
+return true;
+  }
+
+  @Override
+  public int hashCode() {
+int result = 1;
+for (int i = 0; i < size; i++) {
+  long bits = Double.doubleToLongBits(array[i]);
+  result = (31 * result) + Internal.hashLong(bits);
+}
+return result;
+  }
+
+  @Override
+  public DoubleList mutableCopyWithCapacity(int capacity) {
+if (capacity < size) {
+  throw new IllegalArgumentException();
+}
+return new DoubleArrayList(Arrays.copyOf(array, capacity), size);
+  }
+
+  @Override
+  public Double get(int index) {
+return getDouble(index);
+  }
+
+  @Override
+  public double getDouble(int index) {
+ensureIndexInRange(index);
+return array[index];
+  }
+
+  @Override
+  public int size() {
+return size;
+  }
+
+  @Override
+  public Double set(int index, Double element) {
+return setDouble(index, element);
+  }
+
+  

[29/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
--
diff --git 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
new file mode 100644
index 000..7ef9b9c
--- /dev/null
+++ 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
+import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClientServiceCallable;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RpcRetryingCaller;
+import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Tests bulk loading of HFiles with old secure Endpoint client for backward 
compatibility. Will be
+ * removed when old non-secure client for backward compatibility is not 
supported.
+ */
+@RunWith(Parameterized.class)
+@Category({RegionServerTests.class, LargeTests.class})
+public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends 
TestHRegionServerBulkLoad {
+  public TestHRegionServerBulkLoadWithOldSecureEndpoint(int duration) {
+super(duration);
+  }
+
+  private static final Log LOG =
+  LogFactory.getLog(TestHRegionServerBulkLoadWithOldSecureEndpoint.class);
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws IOException {
+conf.setInt("hbase.rpc.timeout", 10 * 1000);
+conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+  "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
+  }
+
+  public static class AtomicHFileLoader extends RepeatingTestThread {
+final AtomicLong numBulkLoads = new AtomicLong();
+final AtomicLong numCompactions = new AtomicLong();
+private TableName tableName;
+
+public AtomicHFileLoader(TableName tableName, TestContext ctx,
+byte targetFamilies[][]) throws IOException {
+  super(ctx);
+  this.tableName = tableName;
+}
+
+public void doAnAction() throws Exception {
+  long iteration = numBulkLoads.getAndIncrement();
+  Path dir =  UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d",
+  iteration));
+
+  // create HFiles for different column families
+  FileSystem fs = UTIL.getTestFileSystem();
+  byte[] val = Bytes.toBytes(String.format("%010d", iteration));
+  final List> famPaths = new ArrayList>(
+  NUM_CFS);
+  for (int i = 0; i < NUM_CFS; i++) {
+Path hfile = new Path(dir, family(i));
+byte[] fam = 

[47/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index 8a85580..ce5adda 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -22,7 +22,7 @@ import static 
org.apache.hadoop.hbase.ipc.CallEvent.Type.TIMEOUT;
 import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled;
 import static org.apache.hadoop.hbase.ipc.IPCUtil.toIOE;
 
-import com.google.protobuf.RpcCallback;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
 
 import io.netty.bootstrap.Bootstrap;
 import io.netty.buffer.ByteBuf;
@@ -52,7 +52,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallEvent;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
 import org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler;
 import org.apache.hadoop.hbase.security.SaslChallengeDecoder;
 import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
@@ -247,12 +247,12 @@ class NettyRpcConnection extends RpcConnection {
   }
 
   @Override
-  public synchronized void sendRequest(final Call call, HBaseRpcController 
pcrc)
+  public synchronized void sendRequest(final Call call, HBaseRpcController hrc)
   throws IOException {
 if (reloginInProgress) {
   throw new IOException("Can not send request because relogin is in 
progress.");
 }
-pcrc.notifyOnCancel(new RpcCallback() {
+hrc.notifyOnCancel(new RpcCallback() {
 
   @Override
   public void run(Object parameter) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
index 5faaede..a302d48 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import com.google.protobuf.Message;
-import com.google.protobuf.Message.Builder;
-import com.google.protobuf.TextFormat;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
 
 import io.netty.buffer.ByteBuf;
 import io.netty.buffer.ByteBufInputStream;
@@ -39,10 +39,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.ipc.RemoteException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
deleted file mode 100644
index 209deed..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed 

[31/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
new file mode 100644
index 000..1849d90
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import 
org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadService;
+import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
+
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+/**
+ * Coprocessor service for bulk loads in secure mode.
+ * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+ */
+@InterfaceAudience.Private
+@Deprecated
+public class SecureBulkLoadEndpoint extends SecureBulkLoadService
+implements CoprocessorService, Coprocessor {
+
+  public static final long VERSION = 0L;
+
+  private static final Log LOG = 
LogFactory.getLog(SecureBulkLoadEndpoint.class);
+
+  private RegionCoprocessorEnvironment env;
+
+  @Override
+  public void start(CoprocessorEnvironment env) {
+this.env = (RegionCoprocessorEnvironment)env;
+LOG.warn("SecureBulkLoadEndpoint is deprecated. It will be removed in 
future releases.");
+LOG.warn("Secure bulk load has been integrated into HBase core.");
+  }
+
+  @Override
+  public void stop(CoprocessorEnvironment env) throws IOException {
+  }
+
+  @Override
+  public void prepareBulkLoad(RpcController controller, PrepareBulkLoadRequest 
request,
+  RpcCallback done) {
+try {
+  SecureBulkLoadManager secureBulkLoadManager =
+  this.env.getRegionServerServices().getSecureBulkLoadManager();
+  String bulkToken = 
secureBulkLoadManager.prepareBulkLoad(this.env.getRegion(),
+  convert(request));
+  
done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
+} catch (IOException e) {
+  CoprocessorRpcUtils.setControllerException(controller, e);
+}
+done.run(null);
+  }
+
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest
+convert(PrepareBulkLoadRequest request)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+byte [] bytes = request.toByteArray();
+
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder
+  

[14/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
new file mode 100644
index 000..656ebb6
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
@@ -0,0 +1,454 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `float`.
+ * The JSON representation for `FloatValue` is JSON number.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.FloatValue}
+ */
+public  final class FloatValue extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.FloatValue)
+FloatValueOrBuilder {
+  // Use FloatValue.newBuilder() to construct.
+  private 
FloatValue(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private FloatValue() {
+value_ = 0F;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private FloatValue(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 13: {
+
+value_ = input.readFloat();
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_FloatValue_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_FloatValue_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.FloatValue.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.FloatValue.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private float value_;
+  /**
+   * 
+   * The float value.
+   * 
+   *
+   * optional float value = 1;
+   */
+  public float getValue() {
+return value_;
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+if (value_ != 0F) {
+  output.writeFloat(1, value_);
+}
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+if (value_ != 0F) {
+  size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+.computeFloatSize(1, value_);
+}
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.FloatValue)) {
+  return 

[62/77] [abbrv] hbase git commit: HBASE-16758 back HBaseZeroCopyByteStringer stuff.

2016-10-07 Thread syuanjiang
HBASE-16758  back HBaseZeroCopyByteStringer stuff.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/617dfe18
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/617dfe18
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/617dfe18

Branch: refs/heads/hbase-12439
Commit: 617dfe18cdc287ea5886e5a9567c9abcd6c0fa28
Parents: 6a9b57b
Author: anoopsamjohn 
Authored: Wed Oct 5 12:57:13 2016 +0530
Committer: anoopsamjohn 
Committed: Wed Oct 5 12:57:13 2016 +0530

--
 .../coprocessor/BigDecimalColumnInterpreter.java   |  5 ++---
 .../hbase/security/access/AccessControlUtil.java   | 17 +
 .../security/visibility/VisibilityClient.java  |  7 ---
 .../FanOutOneBlockAsyncDFSOutputSaslHelper.java|  4 ++--
 .../hadoop/hbase/security/token/TokenUtil.java |  5 +++--
 .../security/visibility/VisibilityController.java  |  4 ++--
 .../hbase/security/visibility/VisibilityUtils.java |  6 ++
 7 files changed, 24 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/617dfe18/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java
index 7d08b7e..9036273 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java
@@ -30,10 +30,9 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import com.google.protobuf.ByteString;
-
 /**
  * ColumnInterpreter for doing Aggregation's with BigDecimal columns. This 
class
  * is required at the RegionServer also.
@@ -124,7 +123,7 @@ public class BigDecimalColumnInterpreter extends 
ColumnInterpreter

[56/77] [abbrv] hbase git commit: HBASE-16742 Add chapter for devs on how we do protobufs going forward; ADDENDUM -- put all notes on CPEPs together in the CPEP section

2016-10-07 Thread syuanjiang
HBASE-16742 Add chapter for devs on how we do protobufs going forward; ADDENDUM 
-- put all notes on CPEPs together in the CPEP section


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b30b6dff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b30b6dff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b30b6dff

Branch: refs/heads/hbase-12439
Commit: b30b6dffe177fb06f33401efd2c22007026a044e
Parents: b952e64
Author: stack 
Authored: Tue Oct 4 08:44:46 2016 -0700
Committer: stack 
Committed: Tue Oct 4 08:44:46 2016 -0700

--
 src/main/asciidoc/_chapters/cp.adoc   | 16 +++-
 src/main/asciidoc/_chapters/protobuf.adoc | 17 -
 2 files changed, 15 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b30b6dff/src/main/asciidoc/_chapters/cp.adoc
--
diff --git a/src/main/asciidoc/_chapters/cp.adoc 
b/src/main/asciidoc/_chapters/cp.adoc
index a9620d3..1817dd3 100644
--- a/src/main/asciidoc/_chapters/cp.adoc
+++ b/src/main/asciidoc/_chapters/cp.adoc
@@ -184,13 +184,15 @@ WalObserver::
 <> provides working examples of observer coprocessors.
 
 
+
+[[cpeps]]
 === Endpoint Coprocessor
 
 Endpoint processors allow you to perform computation at the location of the 
data.
 See <>. An example is the need to calculate 
a running
 average or summation for an entire table which spans hundreds of regions.
 
-In contract to observer coprocessors, where your code is run transparently, 
endpoint
+In contrast to observer coprocessors, where your code is run transparently, 
endpoint
 coprocessors must be explicitly invoked using the
 
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#coprocessorService%28java.lang.Class,%20byte%5B%5D,%20byte%5B%5D,%20org.apache.hadoop.hbase.client.coprocessor.Batch.Call%29[CoprocessorService()]
 method available in
@@ -208,6 +210,18 @@ 
link:https://issues.apache.org/jira/browse/HBASE-5448[HBASE-5448]). To upgrade y
 HBase cluster from 0.94 or earlier to 0.96 or later, you need to reimplement 
your
 coprocessor.
 
+Coprocessor Endpoints should make no use of HBase internals and
+only avail of public APIs; ideally a CPEP should depend on Interfaces
+and data structures only. This is not always possible but beware
+that doing so makes the Endpoint brittle, liable to breakage as HBase
+internals evolve. HBase internal APIs annotated as private or evolving
+do not have to respect semantic versioning rules or general java rules on
+deprecation before removal. While generated protobuf files are
+absent the hbase audience annotations -- they are created by the
+protobuf protoc tool which knows nothing of how HBase works --
+they should be consided `@InterfaceAudience.Private` so are liable to
+change.
+
 <> provides working examples of endpoint coprocessors.
 
 [[cp_loading]]

http://git-wip-us.apache.org/repos/asf/hbase/blob/b30b6dff/src/main/asciidoc/_chapters/protobuf.adoc
--
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc 
b/src/main/asciidoc/_chapters/protobuf.adoc
index 4181878..fa63127 100644
--- a/src/main/asciidoc/_chapters/protobuf.adoc
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -73,23 +73,6 @@ CPEP Service. Going forward, after the release of 
hbase-2.0.0, this
 practice needs to whither. We'll make plain why in the later
 xref:shaded.protobuf[hbase-2.0.0] section.
 
-[[cpeps]]
-=== Coprocessor Endpoints (CPEPs)
-xref:cp:[Coprocessor Endpoints] are custom API a developer can
-add to HBase. Protobufs are used to describe the methods and arguments
-that comprise the new Service.
-Coprocessor Endpoints should make no use of HBase internals and
-only avail of public APIs; ideally a CPEP should depend on Interfaces
-and data structures only. This is not always possible but beware
-that doing so makes the Endpoint brittle, liable to breakage as HBase
-internals evolve. HBase internal APIs annotated as private or evolving
-do not have to respect semantic versioning rules or general java rules on
-deprecation before removal. While generated protobuf files are
-absent the hbase audience annotations -- they are created by the
-protobuf protoc tool which knows nothing of how HBase works --
-they should be consided `@InterfaceAudience.Private` so are liable to
-change.
-
 [[shaded.protobuf]]
 === hbase-2.0.0 and the shading of protobufs (HBASE-15638)
 



[40/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
new file mode 100644
index 000..cde7d41
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -0,0 +1,864 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client.coprocessor;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.NavigableSet;
+import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Message;
+
+/**
+ * This client class is for invoking the aggregate functions deployed on the
+ * Region Server side via the AggregateService. This class will implement the
+ * supporting functionality for summing/processing the individual results
+ * obtained from the AggregateService for each region.
+ * 
+ * This will serve as the client side handler for invoking the aggregate
+ * functions.
+ * For all aggregate functions,
+ * 
+ * start row  end row is an essential condition (if they are not
+ * {@link HConstants#EMPTY_BYTE_ARRAY})
+ * Column family can't be null. In case where multiple families are
+ * provided, an IOException will be thrown. An optional column qualifier can
+ * also be defined.
+ * For methods to find maximum, minimum, sum, rowcount, it returns the
+ * parameter type. For average and std, it returns a double value. For row
+ * count, it returns a long value.
+ * 
+ * Call {@link #close()} when done.
+ */
+@InterfaceAudience.Private
+public class AggregationClient implements Closeable {
+  // TODO: This class is not used.  Move to examples?
+  private static final Log log = LogFactory.getLog(AggregationClient.class);
+  private final Connection connection;
+
+  /**
+   * Constructor with Conf object
+   * @param cfg
+   */
+  public AggregationClient(Configuration cfg) {
+try {
+  // Create a connection on construction. Will use it making each of the 
calls below.
+  this.connection = ConnectionFactory.createConnection(cfg);
+} catch (IOException e) {
+  throw new RuntimeException(e);
+}
+  }
+
+  @Override
+  public void close() throws IOException {
+if (this.connection != null && !this.connection.isClosed()) {
+  this.connection.close();
+}
+  }
+
+  /**
+   * It gives the 

[43/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
new file mode 100644
index 000..2f72eaa
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -0,0 +1,3161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.shaded.protobuf;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLoadStats;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.SnapshotType;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.hadoop.hbase.client.security.SecurityCapability;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.io.LimitInputStream;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.quotas.QuotaScope;
+import org.apache.hadoop.hbase.quotas.QuotaType;
+import org.apache.hadoop.hbase.quotas.ThrottleType;
+import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
+import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.hbase.security.visibility.CellVisibility;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import 

[35/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
new file mode 100644
index 000..a011b30
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
@@ -0,0 +1,1225 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: DummyRegionServerEndpoint.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class DummyRegionServerEndpointProtos {
+  private DummyRegionServerEndpointProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface DummyRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.test.pb.DummyRequest}
+   */
+  public static final class DummyRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements DummyRequestOrBuilder {
+// Use DummyRequest.newBuilder() to construct.
+private DummyRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private DummyRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final DummyRequest defaultInstance;
+public static DummyRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public DummyRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private DummyRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.internal_static_hbase_test_pb_DummyRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.internal_static_hbase_test_pb_DummyRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest.class,
 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public DummyRequest parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+  throws com.google.protobuf.InvalidProtocolBufferException {
+return new DummyRequest(input, extensionRegistry);
+  }
+};
+
+@java.lang.Override
+public com.google.protobuf.Parser getParserForType() {
+  return PARSER;
+}
+
+private void initFields() {
+}
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = 

[71/77] [abbrv] hbase git commit: HBASE-16750 hbase compilation failed on power system (Saravanan Krishnamoorthy)

2016-10-07 Thread syuanjiang
HBASE-16750 hbase compilation failed on power system (Saravanan Krishnamoorthy)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d1e40bf0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d1e40bf0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d1e40bf0

Branch: refs/heads/hbase-12439
Commit: d1e40bf0bda4d82ab217e6b715e7c4dd5a6b9af2
Parents: 912ed17
Author: tedyu 
Authored: Thu Oct 6 06:48:40 2016 -0700
Committer: tedyu 
Committed: Thu Oct 6 06:48:40 2016 -0700

--
 pom.xml | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d1e40bf0/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 2d341c0..7715278 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1009,7 +1009,7 @@
   
 org.asciidoctor
 asciidoctorj-pdf
-1.5.0-alpha.6
+1.5.0-alpha.11
   
 
 
@@ -1019,6 +1019,8 @@
   coderay
   
 ${project.version}
+${project.build.sourceDirectory}
+
   
 
 
@@ -1233,7 +1235,7 @@
 1.3.9-1
 6.18
 2.10.3
-1.5.2.1
+1.5.3
 
 /usr
 /etc/hbase



[70/77] [abbrv] hbase git commit: HBASE-15721 Optimization in cloning cells into MSLAB.

2016-10-07 Thread syuanjiang
HBASE-15721 Optimization in cloning cells into MSLAB.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/912ed172
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/912ed172
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/912ed172

Branch: refs/heads/hbase-12439
Commit: 912ed1728683e68cf06c9a30cde0d0f89e87a880
Parents: 58e843d
Author: anoopsamjohn 
Authored: Thu Oct 6 14:48:03 2016 +0530
Committer: anoopsamjohn 
Committed: Thu Oct 6 14:48:03 2016 +0530

--
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  9 +++
 .../org/apache/hadoop/hbase/ExtendedCell.java   |  7 ++
 .../java/org/apache/hadoop/hbase/KeyValue.java  |  5 ++
 .../org/apache/hadoop/hbase/KeyValueUtil.java   | 37 --
 .../apache/hadoop/hbase/OffheapKeyValue.java|  5 ++
 .../io/encoding/BufferedDataBlockEncoder.java   | 12 
 .../hbase/regionserver/HeapMemStoreLAB.java | 26 +++
 .../hadoop/hbase/regionserver/MemStoreLAB.java  | 20 +++---
 .../hadoop/hbase/regionserver/Segment.java  | 15 +---
 .../regionserver/TestMemStoreChunkPool.java | 34 +
 .../hbase/regionserver/TestMemStoreLAB.java | 74 
 11 files changed, 158 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/912ed172/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 097b11b..7988352 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -543,6 +543,15 @@ public final class CellUtil {
   }
   return len;
 }
+
+@Override
+public void write(byte[] buf, int offset) {
+  offset = KeyValueUtil.appendToByteArray(this.cell, buf, offset, false);
+  int tagsLen = this.tags.length;
+  assert tagsLen > 0;
+  offset = Bytes.putAsShort(buf, offset, tagsLen);
+  System.arraycopy(this.tags, 0, buf, offset, tagsLen);
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/912ed172/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index 51639da..420a5f9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -59,4 +59,11 @@ public interface ExtendedCell extends Cell, 
SettableSequenceId, SettableTimestam
*/
   // TODO remove the boolean param once HBASE-16706 is done.
   int getSerializedSize(boolean withTags);
+
+  /**
+   * Write the given Cell into the given buf's offset.
+   * @param buf The buffer where to write the Cell.
+   * @param offset The offset within buffer, to write the Cell.
+   */
+  void write(byte[] buf, int offset);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/912ed172/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 47fad86..8f8554c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -2492,6 +2492,11 @@ public class KeyValue implements ExtendedCell {
 return this.getKeyLength() + this.getValueLength() + 
KEYVALUE_INFRASTRUCTURE_SIZE;
   }
 
+  @Override
+  public void write(byte[] buf, int offset) {
+System.arraycopy(this.bytes, this.offset, buf, offset, this.length);
+  }
+
   /**
* Comparator that compares row component only of a KeyValue.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/912ed172/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 7b9bcb1..39b3653 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -136,7 +136,7 @@ public class KeyValueUtil {
   public static byte[] copyToNewByteArray(final Cell cell) {
 int v1Length = length(cell);
 

[65/77] [abbrv] hbase git commit: HBASE-16753 There is a mismatch between suggested Java version in hbase-env.sh

2016-10-07 Thread syuanjiang
HBASE-16753 There is a mismatch between suggested Java version in hbase-env.sh

Signed-off-by: Dima Spivak 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1f1a13f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1f1a13f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1f1a13f2

Branch: refs/heads/hbase-12439
Commit: 1f1a13f2e2a28eb818cd85b6c50e47b52aaa2c2e
Parents: 3aa4dfa
Author: Umesh Agashe 
Authored: Mon Oct 3 14:02:28 2016 -0700
Committer: Dima Spivak 
Committed: Wed Oct 5 10:16:41 2016 -0700

--
 bin/hbase-config.sh| 2 +-
 conf/hbase-env.cmd | 2 +-
 conf/hbase-env.sh  | 4 ++--
 src/main/asciidoc/_chapters/configuration.adoc | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1f1a13f2/bin/hbase-config.sh
--
diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh
index d7d7e6f..bf4ee92 100644
--- a/bin/hbase-config.sh
+++ b/bin/hbase-config.sh
@@ -132,7 +132,7 @@ if [ -z "$JAVA_HOME" ]; then
 | Please download the latest Sun JDK from the Sun Java web site|
 | > http://www.oracle.com/technetwork/java/javase/downloads|
 |  |
-| HBase requires Java 1.7 or later.|
+| HBase requires Java 1.8 or later.|
 +==+
 EOF
 exit 1

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f1a13f2/conf/hbase-env.cmd
--
diff --git a/conf/hbase-env.cmd b/conf/hbase-env.cmd
index d16de55..8c8597e 100644
--- a/conf/hbase-env.cmd
+++ b/conf/hbase-env.cmd
@@ -18,7 +18,7 @@
 
 @rem Set environment variables here.
 
-@rem The java implementation to use.  Java 1.7+ required.
+@rem The java implementation to use.  Java 1.8+ required.
 @rem set JAVA_HOME=c:\apps\java
 
 @rem Extra Java CLASSPATH elements.  Optional.

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f1a13f2/conf/hbase-env.sh
--
diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh
index 31e8441..d9879c6 100644
--- a/conf/hbase-env.sh
+++ b/conf/hbase-env.sh
@@ -24,8 +24,8 @@
 # so try to keep things idempotent unless you want to take an even deeper look
 # into the startup scripts (bin/hbase, etc.)
 
-# The java implementation to use.  Java 1.7+ required.
-# export JAVA_HOME=/usr/java/jdk1.6.0/
+# The java implementation to use.  Java 1.8+ required.
+# export JAVA_HOME=/usr/java/jdk1.8.0/
 
 # Extra Java CLASSPATH elements.  Optional.
 # export HBASE_CLASSPATH=

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f1a13f2/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 4804332..048b047 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -729,7 +729,7 @@ The following lines in the _hbase-env.sh_ file show how to 
set the `JAVA_HOME` e
 
 
 # The java implementation to use.
-export JAVA_HOME=/usr/java/jdk1.7.0/
+export JAVA_HOME=/usr/java/jdk1.8.0/
 
 # The maximum amount of heap to use. Default is left to JVM default.
 export HBASE_HEAPSIZE=4G



[55/77] [abbrv] hbase git commit: HBASE-15560 TinyLFU-based BlockCache - revert pending performance verification

2016-10-07 Thread syuanjiang
HBASE-15560 TinyLFU-based BlockCache - revert pending performance verification


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b952e647
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b952e647
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b952e647

Branch: refs/heads/hbase-12439
Commit: b952e64751d309e920bf6e44caa2b3d5801e3be8
Parents: b5d34cf
Author: tedyu 
Authored: Tue Oct 4 08:37:29 2016 -0700
Committer: tedyu 
Committed: Tue Oct 4 08:37:29 2016 -0700

--
 .../src/main/resources/hbase-default.xml|   5 -
 .../src/main/resources/supplemental-models.xml  |  16 +-
 hbase-server/pom.xml|   4 -
 .../hadoop/hbase/io/hfile/CacheConfig.java  |  91 ++---
 .../hbase/io/hfile/CombinedBlockCache.java  |  48 +--
 .../hbase/io/hfile/FirstLevelBlockCache.java|  45 ---
 .../io/hfile/InclusiveCombinedBlockCache.java   |   6 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.java|  36 +-
 .../hbase/io/hfile/TinyLfuBlockCache.java   | 402 ---
 .../hbase/io/hfile/bucket/BucketCache.java  |   5 +-
 .../hadoop/hbase/io/hfile/TestCacheConfig.java  |   8 +-
 .../hbase/io/hfile/TestTinyLfuBlockCache.java   | 304 --
 pom.xml |   6 -
 13 files changed, 79 insertions(+), 897 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b952e647/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 3b7b05e..4f769cb 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -807,11 +807,6 @@ possible configurations would overwhelm and obscure the 
important.
   The default thread pool size if parallel-seeking feature 
enabled.
   
   
-hfile.block.cache.policy
-LRU
-The eviction policy for the L1 block cache (LRU or 
TinyLFU).
-  
-  
 hfile.block.cache.size
 0.4
 Percentage of maximum heap (-Xmx setting) to allocate to 
block cache

http://git-wip-us.apache.org/repos/asf/hbase/blob/b952e647/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index 11c405e..0979b5f 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -152,20 +152,6 @@ under the License.
   
   
 
-  com.github.ben-manes.caffeine
-  caffeine
-
-  
-
-  Apache License, Version 2.0
-  http://www.apache.org/licenses/LICENSE-2.0.txt
-  repo
-
-  
-
-  
-  
-
   com.lmax
   disruptor
 
@@ -1682,7 +1668,7 @@ Mozilla Public License Version 2.0
 means any form of the work other than Source Code Form.
 
 1.7. "Larger Work"
-means a work that combines Covered Software with other material, in
+means a work that combines Covered Software with other material, in 
 a separate file or files, that is not Covered Software.
 
 1.8. "License"

http://git-wip-us.apache.org/repos/asf/hbase/blob/b952e647/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index d036357..a431006 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -435,10 +435,6 @@
true
 
 
-  com.github.ben-manes.caffeine
-  caffeine
-
-
   io.dropwizard.metrics
   metrics-core
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b952e647/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 0933e82..321f72c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -22,14 +22,13 @@ import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
-import java.util.concurrent.ForkJoinPool;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import 

[39/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
new file mode 100644
index 000..373e036
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
@@ -0,0 +1,1792 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: BulkDelete.proto
+
+package org.apache.hadoop.hbase.coprocessor.example.generated;
+
+public final class BulkDeleteProtos {
+  private BulkDeleteProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface BulkDeleteRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required .hbase.pb.Scan scan = 1;
+/**
+ * required .hbase.pb.Scan scan = 1;
+ */
+boolean hasScan();
+/**
+ * required .hbase.pb.Scan scan = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan();
+/**
+ * required .hbase.pb.Scan scan = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder 
getScanOrBuilder();
+
+// required .hbase.pb.BulkDeleteRequest.DeleteType deleteType = 2;
+/**
+ * required .hbase.pb.BulkDeleteRequest.DeleteType deleteType = 
2;
+ */
+boolean hasDeleteType();
+/**
+ * required .hbase.pb.BulkDeleteRequest.DeleteType deleteType = 
2;
+ */
+
org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType
 getDeleteType();
+
+// optional uint64 timestamp = 3;
+/**
+ * optional uint64 timestamp = 3;
+ */
+boolean hasTimestamp();
+/**
+ * optional uint64 timestamp = 3;
+ */
+long getTimestamp();
+
+// required uint32 rowBatchSize = 4;
+/**
+ * required uint32 rowBatchSize = 4;
+ */
+boolean hasRowBatchSize();
+/**
+ * required uint32 rowBatchSize = 4;
+ */
+int getRowBatchSize();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.BulkDeleteRequest}
+   */
+  public static final class BulkDeleteRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements BulkDeleteRequestOrBuilder {
+// Use BulkDeleteRequest.newBuilder() to construct.
+private BulkDeleteRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private BulkDeleteRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final BulkDeleteRequest defaultInstance;
+public static BulkDeleteRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public BulkDeleteRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private BulkDeleteRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder subBuilder 
= null;
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+subBuilder = scan_.toBuilder();
+  }
+  scan_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.PARSER,
 extensionRegistry);
+  if (subBuilder != null) {
+subBuilder.mergeFrom(scan_);
+scan_ = subBuilder.buildPartial();
+  }
+  bitField0_ |= 0x0001;
+  break;
+}
+case 16: {
+  int rawValue = input.readEnum();
+  

[68/77] [abbrv] hbase git commit: HBASE-16778 Move testIllegalTableDescriptor out from TestFromClientSide

2016-10-07 Thread syuanjiang
HBASE-16778 Move testIllegalTableDescriptor out from TestFromClientSide


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eb33b60a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eb33b60a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eb33b60a

Branch: refs/heads/hbase-12439
Commit: eb33b60a954d8695f07b5ce71501760d732a85b6
Parents: b548d49
Author: Matteo Bertozzi 
Authored: Wed Oct 5 20:04:18 2016 -0700
Committer: Matteo Bertozzi 
Committed: Wed Oct 5 20:04:18 2016 -0700

--
 .../hadoop/hbase/client/TestFromClientSide.java | 148 -
 .../client/TestIllegalTableDescriptor.java  | 218 +++
 2 files changed, 218 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eb33b60a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 50a566a..6bd9ccd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -88,7 +88,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
@@ -107,10 +106,8 @@ import 
org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -5293,151 +5290,6 @@ public class TestFromClientSide {
   }
 
   @Test
-  public void testIllegalTableDescriptor() throws Exception {
-HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf("testIllegalTableDescriptor"));
-HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
-
-// create table with 0 families
-checkTableIsIllegal(htd);
-htd.addFamily(hcd);
-checkTableIsLegal(htd);
-
-htd.setMaxFileSize(1024); // 1K
-checkTableIsIllegal(htd);
-htd.setMaxFileSize(0);
-checkTableIsIllegal(htd);
-htd.setMaxFileSize(1024 * 1024 * 1024); // 1G
-checkTableIsLegal(htd);
-
-htd.setMemStoreFlushSize(1024);
-checkTableIsIllegal(htd);
-htd.setMemStoreFlushSize(0);
-checkTableIsIllegal(htd);
-htd.setMemStoreFlushSize(128 * 1024 * 1024); // 128M
-checkTableIsLegal(htd);
-
-htd.setRegionSplitPolicyClassName("nonexisting.foo.class");
-checkTableIsIllegal(htd);
-htd.setRegionSplitPolicyClassName(null);
-checkTableIsLegal(htd);
-
-hcd.setBlocksize(0);
-checkTableIsIllegal(htd);
-hcd.setBlocksize(1024 * 1024 * 128); // 128M
-checkTableIsIllegal(htd);
-hcd.setBlocksize(1024);
-checkTableIsLegal(htd);
-
-hcd.setTimeToLive(0);
-checkTableIsIllegal(htd);
-hcd.setTimeToLive(-1);
-checkTableIsIllegal(htd);
-hcd.setTimeToLive(1);
-checkTableIsLegal(htd);
-
-hcd.setMinVersions(-1);
-checkTableIsIllegal(htd);
-hcd.setMinVersions(3);
-try {
-  hcd.setMaxVersions(2);
-  fail();
-} catch (IllegalArgumentException ex) {
-  // expected
-  hcd.setMaxVersions(10);
-}
-checkTableIsLegal(htd);
-
-// HBASE-13776 Setting illegal versions for HColumnDescriptor
-//  does not throw IllegalArgumentException
-// finally, minVersions must be less than or equal to maxVersions
-hcd.setMaxVersions(4);
-hcd.setMinVersions(5);
-checkTableIsIllegal(htd);
-hcd.setMinVersions(3);
-
-hcd.setScope(-1);
-checkTableIsIllegal(htd);
-hcd.setScope(0);
-checkTableIsLegal(htd);
-
-try {
-  hcd.setDFSReplication((short) -1);
-  fail("Illegal value for setDFSReplication did not throw");
-} catch (IllegalArgumentException e) {
-  // pass
-}
-// set an illegal DFS replication value by hand
-hcd.setValue(HColumnDescriptor.DFS_REPLICATION, "-1");
-checkTableIsIllegal(htd);
-try {
-  

[72/77] [abbrv] hbase git commit: HBASE-16777 Fix flaky TestMasterProcedureEvents

2016-10-07 Thread syuanjiang
HBASE-16777 Fix flaky TestMasterProcedureEvents


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7a385097
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7a385097
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7a385097

Branch: refs/heads/hbase-12439
Commit: 7a38509782ba571cf6b4a5c02ed0ce693ae88ad7
Parents: d1e40bf
Author: Matteo Bertozzi 
Authored: Thu Oct 6 07:24:39 2016 -0700
Committer: Matteo Bertozzi 
Committed: Thu Oct 6 07:24:39 2016 -0700

--
 .../procedure/MasterProcedureScheduler.java |  20 
 .../procedure/TestMasterProcedureEvents.java| 102 +++
 2 files changed, 80 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7a385097/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 26ecd94..5282acc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -586,6 +586,14 @@ public class MasterProcedureScheduler implements 
ProcedureRunnableSet {
   }
   return proc;
 }
+
+@VisibleForTesting
+protected synchronized int size() {
+  if (waitingProcedures != null) {
+return waitingProcedures.size();
+  }
+  return 0;
+}
   }
 
   public static class ProcedureEvent extends BaseProcedureEvent {
@@ -647,6 +655,18 @@ public class MasterProcedureScheduler implements 
ProcedureRunnableSet {
   return description;
 }
 
+@VisibleForTesting
+protected synchronized int size() {
+  int count = super.size();
+  if (waitingTables != null) {
+count += waitingTables.size();
+  }
+  if (waitingServers != null) {
+count += waitingServers.size();
+  }
+  return count;
+}
+
 @Override
 public String toString() {
   return String.format("%s(%s)", getClass().getSimpleName(), 
getDescription());

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a385097/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
index 1ff7473..4c53845 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
@@ -44,6 +44,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -62,14 +64,15 @@ public class TestMasterProcedureEvents {
   private static long nonce = HConstants.NO_NONCE;
 
   private static void setupConf(Configuration conf) {
-conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 8);
+conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
 conf.setBoolean(WALProcedureStore.USE_HSYNC_CONF_KEY, false);
   }
 
   @BeforeClass
   public static void setupCluster() throws Exception {
 setupConf(UTIL.getConfiguration());
-UTIL.startMiniCluster(3);
+UTIL.startMiniCluster(2);
+UTIL.waitUntilNoRegionsInTransition();
   }
 
   @AfterClass
@@ -81,47 +84,37 @@ public class TestMasterProcedureEvents {
 }
   }
 
-  @Test
+  @After
+  public void tearDown() throws Exception {
+for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+  LOG.info("Tear down, remove table=" + htd.getTableName());
+  UTIL.deleteTable(htd.getTableName());
+}
+  }
+
+  @Test(timeout = 3)
   public void testMasterInitializedEvent() throws Exception {
 TableName tableName = TableName.valueOf("testMasterInitializedEvent");
 HMaster master = UTIL.getMiniHBaseCluster().getMaster();
 ProcedureExecutor procExec = 
master.getMasterProcedureExecutor();
-MasterProcedureScheduler procSched = 

[49/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
new file mode 100644
index 000..2dc73e0
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
+
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
+
+/**
+ * Provides clients with an RPC connection to call Coprocessor Endpoint
+ * {@link com.google.protobuf.Service}s
+ * against a given table region.  An instance of this class may be obtained
+ * by calling {@link 
org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
+ * but should normally only be used in creating a new {@link 
com.google.protobuf.Service} stub to
+ * call the endpoint methods.
+ * @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])
+ */
+@InterfaceAudience.Private
+class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel {
+  private static final Log LOG = 
LogFactory.getLog(RegionCoprocessorRpcChannel.class);
+  private final TableName table;
+  private final byte [] row;
+  private final ClusterConnection conn;
+  private byte[] lastRegion;
+  private final int operationTimeout;
+  private final RpcRetryingCallerFactory rpcCallerFactory;
+
+  /**
+   * Constructor
+   * @param conn connection to use
+   * @param table to connect to
+   * @param row to locate region with
+   */
+  RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] 
row) {
+this.table = table;
+this.row = row;
+this.conn = conn;
+this.operationTimeout = 
conn.getConnectionConfiguration().getOperationTimeout();
+this.rpcCallerFactory = conn.getRpcRetryingCallerFactory();
+  }
+
+  @Override
+  protected Message callExecService(final RpcController controller,
+  final Descriptors.MethodDescriptor method, final Message request,
+  final Message responsePrototype)
+  throws IOException {
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Call: " + method.getName() + ", " + request.toString());
+}
+if (row == null) {
+  throw new NullPointerException("Can't be null!");
+}
+ClientServiceCallable callable =
+  new ClientServiceCallable(this.conn,
+  this.table, this.row, 
this.conn.getRpcControllerFactory().newController()) {
+  @Override
+  protected CoprocessorServiceResponse rpcCall() throws Exception {
+byte [] regionName = getLocation().getRegionInfo().getRegionName();
+CoprocessorServiceRequest csr =
+CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request, 
row, regionName);
+return getStub().execService(getRpcController(), csr);
+  }
+};
+CoprocessorServiceResponse result =
+this.rpcCallerFactory. 
newCaller().callWithRetries(callable,
+operationTimeout);
+this.lastRegion = result.getRegion().getValue().toByteArray();
+return CoprocessorRpcUtils.getResponse(result, responsePrototype);
+  }
+
+  /**
+   * Get last region this RpcChannel communicated with
+   * @return region name as byte array
+   */
+  public byte[] getLastRegion() {
+return lastRegion;
+  }
+}
\ No newline at end of file


[64/77] [abbrv] hbase git commit: HBASE-16690 Move znode path configs to a separated class

2016-10-07 Thread syuanjiang
HBASE-16690 Move znode path configs to a separated class


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3aa4dfa7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3aa4dfa7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3aa4dfa7

Branch: refs/heads/hbase-12439
Commit: 3aa4dfa73d56a1a6a42274e8d65dcbb694a072c7
Parents: 617dfe1
Author: zhangduo 
Authored: Fri Sep 23 23:30:43 2016 +0800
Committer: zhangduo 
Committed: Wed Oct 5 20:12:44 2016 +0800

--
 .../hbase/client/ConnectionImplementation.java  |   4 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   2 +-
 .../hadoop/hbase/client/ZooKeeperRegistry.java  |   2 +-
 .../replication/ReplicationStateZKBase.java |   3 +-
 .../replication/ReplicationTrackerZKImpl.java   |   4 +-
 .../hbase/zookeeper/MasterAddressTracker.java   |  13 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  11 +-
 .../hadoop/hbase/zookeeper/ZKClusterId.java |   6 +-
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  15 +-
 .../hadoop/hbase/zookeeper/ZNodePaths.java  | 176 +++
 .../hadoop/hbase/zookeeper/ZkAclReset.java  |   4 +-
 .../hbase/zookeeper/ZooKeeperNodeTracker.java   |  10 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java   | 213 +++
 .../hbase/zookeeper/TestZooKeeperWatcher.java   |  42 ++--
 .../hbase/IntegrationTestMetaReplicas.java  |   2 +-
 .../test/IntegrationTestZKAndFSPermissions.java |   2 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   6 +-
 .../rsgroup/VerifyingRSGroupAdminClient.java|   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|   2 +-
 .../apache/hadoop/hbase/ZKNamespaceManager.java |   2 +-
 .../backup/example/ZKTableArchiveClient.java|   4 +-
 .../ZKSplitLogManagerCoordination.java  |  41 ++--
 .../ZkSplitLogWorkerCoordination.java   |  26 +--
 .../hbase/master/ActiveMasterManager.java   |  18 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   8 +-
 .../hbase/master/MasterMetaBootstrap.java   |   9 +-
 .../hadoop/hbase/master/ServerManager.java  |   2 +-
 .../hadoop/hbase/master/TableLockManager.java   |  12 +-
 .../hadoop/hbase/mob/mapreduce/SweepJob.java|   3 +-
 .../hadoop/hbase/procedure/ZKProcedureUtil.java |   2 +-
 .../hbase/regionserver/HRegionServer.java   |   6 +-
 .../replication/HBaseReplicationEndpoint.java   |   4 +-
 .../security/access/ZKPermissionWatcher.java|   8 +-
 .../hbase/security/token/ZKSecretWatcher.java   |   2 +-
 .../visibility/ZKVisibilityLabelWatcher.java|   4 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   4 +-
 .../hadoop/hbase/util/ZKDataMigrator.java   |   4 +-
 .../hbase/util/hbck/ReplicationChecker.java |   2 +-
 .../hbase/zookeeper/ClusterStatusTracker.java   |  10 +-
 .../hbase/zookeeper/DrainingServerTracker.java  |   8 +-
 .../hbase/zookeeper/LoadBalancerTracker.java|   8 +-
 .../zookeeper/MasterMaintenanceModeTracker.java |   4 +-
 .../zookeeper/RecoveringRegionWatcher.java  |   4 +-
 .../zookeeper/RegionNormalizerTracker.java  |   8 +-
 .../hbase/zookeeper/RegionServerTracker.java|  10 +-
 .../hbase/zookeeper/SplitOrMergeTracker.java|   8 +-
 .../hadoop/hbase/zookeeper/ZKSplitLog.java  |  14 +-
 .../hbase/client/TestMetaWithReplicas.java  |   4 +-
 .../hbase/master/TestActiveMasterManager.java   |  14 +-
 .../master/TestDistributedLogSplitting.java |  16 +-
 .../hbase/master/TestHMasterRPCException.java   |   6 +-
 .../hbase/master/TestMasterNoCluster.java   |   2 +-
 .../hbase/master/TestMasterStatusServlet.java   |   2 +
 .../hbase/master/TestMasterWalManager.java  |   9 +-
 .../hbase/master/TestMetaShutdownHandler.java   |   2 +-
 .../hbase/master/TestSplitLogManager.java   |  18 +-
 .../hbase/master/TestTableLockManager.java  |   5 +-
 .../hbase/master/TestTableStateManager.java |   2 +-
 .../hbase/mob/mapreduce/TestMobSweepMapper.java |   2 +-
 .../mob/mapreduce/TestMobSweepReducer.java  |   2 +-
 .../regionserver/TestMasterAddressTracker.java  |   6 +-
 .../regionserver/TestRegionServerHostname.java  |   2 +-
 .../hbase/regionserver/TestSplitLogWorker.java  |  26 +--
 .../TestReplicationStateHBaseImpl.java  |   2 +-
 .../replication/TestReplicationStateZKImpl.java |   4 +-
 .../TestReplicationTrackerZKImpl.java   |  19 +-
 .../hadoop/hbase/zookeeper/TestZKMulti.java |  40 ++--
 .../zookeeper/TestZooKeeperNodeTracker.java |   6 +-
 .../lock/TestZKInterProcessReadWriteLock.java   |   6 +-
 69 files changed, 491 insertions(+), 458 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3aa4dfa7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

[58/77] [abbrv] hbase git commit: HBASE-16764 hbase-protocol-shaded generate-shaded-classes profile unpacks shaded java files into wrong location

2016-10-07 Thread syuanjiang
HBASE-16764 hbase-protocol-shaded generate-shaded-classes profile unpacks 
shaded java files into wrong location


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89eb71f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89eb71f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89eb71f1

Branch: refs/heads/hbase-12439
Commit: 89eb71f1b6c632fa9156e3cf27f1980962faf171
Parents: 34ad965
Author: stack 
Authored: Tue Oct 4 11:51:47 2016 -0700
Committer: stack 
Committed: Tue Oct 4 11:56:16 2016 -0700

--
 hbase-protocol-shaded/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/89eb71f1/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index e0e9eec..071b8df 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -329,7 +329,7 @@
   sources
   jar
   true
-  ${default.sources.dir}
+  
${basedir}/src/main/java
   **/*.java
 
   



[61/77] [abbrv] hbase git commit: HBASE-16760 Deprecate ByteString related methods in Bytes.java.

2016-10-07 Thread syuanjiang
HBASE-16760 Deprecate ByteString related methods in Bytes.java.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a9b57b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a9b57b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a9b57b3

Branch: refs/heads/hbase-12439
Commit: 6a9b57b39520780441a236df3bb8d4ef208c4edb
Parents: 5ae516b
Author: anoopsamjohn 
Authored: Wed Oct 5 11:19:37 2016 +0530
Committer: anoopsamjohn 
Committed: Wed Oct 5 11:19:37 2016 +0530

--
 .../main/java/org/apache/hadoop/hbase/util/Bytes.java | 14 ++
 1 file changed, 6 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6a9b57b3/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 2d7d3f6..626132b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -28,7 +28,6 @@ import java.io.UnsupportedEncodingException;
 import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
 import java.security.SecureRandom;
 import java.util.Arrays;
@@ -66,13 +65,6 @@ import com.google.protobuf.ByteString;
 value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
 justification="It has been like this forever")
 public class Bytes implements Comparable {
-  //HConstants.UTF8_ENCODING should be updated if this changed
-  /** When we encode strings, we always specify UTF8 encoding */
-  private static final String UTF8_ENCODING = "UTF-8";
-
-  //HConstants.UTF8_CHARSET should be updated if this changed
-  /** When we encode strings, we always specify UTF8 encoding */
-  private static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING);
 
   // Using the charset canonical name for String/byte[] conversions is much
   // more efficient due to use of cached encoders/decoders.
@@ -193,7 +185,9 @@ public class Bytes implements Comparable {
   /**
* Copy bytes from ByteString instance.
* @param byteString copy from
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
*/
+  @Deprecated
   public Bytes(final ByteString byteString) {
 this(byteString.toByteArray());
   }
@@ -259,6 +253,10 @@ public class Bytes implements Comparable {
 return this.offset;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
+   */
+  @Deprecated
   public ByteString toByteString() {
 return ByteString.copyFrom(this.bytes, this.offset, this.length);
   }



[75/77] [abbrv] hbase git commit: HBASE-16768 Inconsistent results from the Append/Increment (ChiaPing Tsai)

2016-10-07 Thread syuanjiang
HBASE-16768 Inconsistent results from the Append/Increment (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96d34f2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96d34f2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96d34f2a

Branch: refs/heads/hbase-12439
Commit: 96d34f2a79bf977d83f0b9814b253669e6c6e671
Parents: 2c7211e
Author: tedyu 
Authored: Fri Oct 7 00:59:27 2016 -0700
Committer: tedyu 
Committed: Fri Oct 7 00:59:27 2016 -0700

--
 .../hbase/regionserver/AbstractMemStore.java|  9 +++
 .../hadoop/hbase/regionserver/HRegion.java  | 51 
 .../hadoop/hbase/regionserver/HStore.java   | 10 
 .../hadoop/hbase/regionserver/MemStore.java |  7 +++
 .../apache/hadoop/hbase/regionserver/Store.java |  7 +++
 .../hadoop/hbase/client/TestFromClientSide.java | 61 
 6 files changed, 121 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/96d34f2a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index aa6576f..5544251 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -97,6 +97,15 @@ public abstract class AbstractMemStore implements MemStore {
*/
   public abstract void updateLowestUnflushedSequenceIdInWAL(boolean 
onlyIfMoreRecent);
 
+  @Override
+  public long add(Iterable cells) {
+long size = 0;
+for (Cell cell : cells) {
+  size += add(cell);
+}
+return size;
+  }
+  
   /**
* Write an update
* @param cell the cell to be added

http://git-wip-us.apache.org/repos/asf/hbase/blob/96d34f2a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 757ddab..d1684a3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3256,8 +3256,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 if (batchOp.retCodeDetails[i].getOperationStatusCode() != 
OperationStatusCode.NOT_RUN) {
   continue;
 }
-addedSize += applyFamilyMapToMemstore(familyMaps[i], replay,
+// We need to update the sequence id for following reasons.
+// 1) If the op is in replay mode, FSWALEntry#stampRegionSequenceId 
won't stamp sequence id.
+// 2) If no WAL, FSWALEntry won't be used
+boolean updateSeqId = replay || batchOp.getMutation(i).getDurability() 
== Durability.SKIP_WAL;
+if (updateSeqId) {
+  this.updateSequenceId(familyMaps[i].values(),
 replay? batchOp.getReplaySequenceId(): 
writeEntry.getWriteNumber());
+}
+addedSize += applyFamilyMapToMemstore(familyMaps[i]);
   }
 
   // STEP 6. Complete mvcc.
@@ -3673,6 +3680,16 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
+  private void updateSequenceId(final Iterable cellItr, final long 
sequenceId)
+  throws IOException {
+for (List cells: cellItr) {
+  if (cells == null) return;
+  for (Cell cell : cells) {
+CellUtil.setSequenceId(cell, sequenceId);
+  }
+}
+  }
+
   @Override
   public void updateCellTimestamps(final Iterable cellItr, final 
byte[] now)
   throws IOException {
@@ -3783,15 +3800,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* @param familyMap Map of Cells by family
* @return the additional memory usage of the memstore caused by the new 
entries.
*/
-  private long applyFamilyMapToMemstore(Map familyMap, 
boolean replay,
-  long sequenceId)
+  private long applyFamilyMapToMemstore(Map familyMap)
   throws IOException {
 long size = 0;
 for (Map.Entry e : familyMap.entrySet()) {
   byte[] family = e.getKey();
   List cells = e.getValue();
   assert cells instanceof RandomAccess;
-  size += applyToMemstore(getStore(family), cells, false, replay, 
sequenceId);
+  size += 

[16/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
new file mode 100644
index 000..15951b3
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
@@ -0,0 +1,2450 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/type.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * A single field of a message type.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Field}
+ */
+public  final class Field extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Field)
+FieldOrBuilder {
+  // Use Field.newBuilder() to construct.
+  private 
Field(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Field() {
+kind_ = 0;
+cardinality_ = 0;
+number_ = 0;
+name_ = "";
+typeUrl_ = "";
+oneofIndex_ = 0;
+packed_ = false;
+options_ = java.util.Collections.emptyList();
+jsonName_ = "";
+defaultValue_ = "";
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Field(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 8: {
+int rawValue = input.readEnum();
+
+kind_ = rawValue;
+break;
+  }
+  case 16: {
+int rawValue = input.readEnum();
+
+cardinality_ = rawValue;
+break;
+  }
+  case 24: {
+
+number_ = input.readInt32();
+break;
+  }
+  case 34: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 50: {
+java.lang.String s = input.readStringRequireUtf8();
+
+typeUrl_ = s;
+break;
+  }
+  case 56: {
+
+oneofIndex_ = input.readInt32();
+break;
+  }
+  case 64: {
+
+packed_ = input.readBool();
+break;
+  }
+  case 74: {
+if (!((mutable_bitField0_ & 0x0080) == 0x0080)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0080;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 82: {
+java.lang.String s = input.readStringRequireUtf8();
+
+jsonName_ = s;
+break;
+  }
+  case 90: {
+java.lang.String s = input.readStringRequireUtf8();
+
+defaultValue_ = s;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0080) == 0x0080)) {
+options_ = java.util.Collections.unmodifiableList(options_);
+  }
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TypeProto.internal_static_google_protobuf_Field_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+

[60/77] [abbrv] hbase git commit: HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file

2016-10-07 Thread syuanjiang
HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ae516bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ae516bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ae516bd

Branch: refs/heads/hbase-12439
Commit: 5ae516bd632afd8de6cf113235365877525c1243
Parents: 0daeb63
Author: Mikhail Antonov 
Authored: Tue Oct 4 21:10:42 2016 -0700
Committer: Mikhail Antonov 
Committed: Tue Oct 4 21:10:42 2016 -0700

--
 .../hadoop/hbase/io/hfile/HFileBlock.java   | 24 
 1 file changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ae516bd/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 9d2ccb2..13b501a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -371,15 +371,16 @@ public class HFileBlock implements Cacheable {
 final int uncompressedSizeWithoutHeader =
 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
 final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
-byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
-int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
-int onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
 // This constructor is called when we deserialize a block from cache and 
when we read a block in
 // from the fs. fileCache is null when deserialized from cache so need to 
make up one.
 HFileContextBuilder fileContextBuilder = fileContext != null?
 new HFileContextBuilder(fileContext): new HFileContextBuilder();
 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
+int onDiskDataSizeWithHeader;
 if (usesHBaseChecksum) {
+  byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
+  int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
+  onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
   // Use the checksum type and bytes per checksum from header, not from 
filecontext.
   
fileContextBuilder.withChecksumType(ChecksumType.codeToType(checksumType));
   fileContextBuilder.withBytesPerCheckSum(bytesPerChecksum);
@@ -419,11 +420,12 @@ public class HFileBlock implements Cacheable {
   /**
* Parse total ondisk size including header and checksum.
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
+   * @param verifyChecksum true if checksum verification is in use.
* @return Size of the block with header included.
*/
-  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf) {
-// Set hbase checksum to true always calling headerSize.
-return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + 
headerSize(true);
+  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf, 
boolean verifyChecksum) {
+return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
+  headerSize(verifyChecksum);
   }
 
   /**
@@ -1659,10 +1661,10 @@ public class HFileBlock implements Cacheable {
  * @throws IOException
  */
 private void verifyOnDiskSizeMatchesHeader(final int passedIn, final 
ByteBuffer headerBuf,
-final long offset)
+final long offset, boolean verifyChecksum)
 throws IOException {
   // Assert size provided aligns with what is in the header
-  int fromHeader = getOnDiskSizeWithHeader(headerBuf);
+  int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum);
   if (passedIn != fromHeader) {
 throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " 
!= " + fromHeader +
 ", offset=" + offset + ", fileContext=" + this.fileContext);
@@ -1703,7 +1705,8 @@ public class HFileBlock implements Cacheable {
   readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), 
hdrSize, false,
   offset, pread);
 }
-onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf);
+onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf,
+  this.fileContext.isUseHBaseChecksum());
   }
   int preReadHeaderSize = headerBuf == null? 0 : hdrSize;
   // Allocate enough space to fit the next block's header too; saves a 
seek next time through.
@@ -1722,7 +1725,8 @@ public class 

[01/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 b2eac0da3 -> bc9a97245


http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
new file mode 100644
index 000..491089b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
@@ -0,0 +1,2108 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.CharBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.logging.Logger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Provide text parsing and formatting support for proto2 instances.
+ * The implementation largely follows google/protobuf/text_format.cc.
+ *
+ * @author wen...@google.com Wenbo Zhu
+ * @author ken...@google.com Kenton Varda
+ */
+public final class TextFormat {
+  private TextFormat() {}
+
+  private static final Logger logger =
+  Logger.getLogger(TextFormat.class.getName());
+
+  private static final Printer DEFAULT_PRINTER = new Printer();
+  private static final Printer SINGLE_LINE_PRINTER =
+  (new Printer()).setSingleLineMode(true);
+  private static final Printer UNICODE_PRINTER =
+  (new Printer()).setEscapeNonAscii(false);
+
+  /**
+   * Outputs a textual representation of the Protocol Message supplied into
+   * the parameter output. (This representation is the new version of the
+   * classic "ProtocolPrinter" output from the original Protocol Buffer system)
+   */
+  public static void print(
+  final MessageOrBuilder message, final Appendable output)
+  throws IOException {
+DEFAULT_PRINTER.print(message, new TextGenerator(output));
+  }
+
+  /** Outputs a textual representation of {@code fields} to {@code output}. */
+  public static void print(final UnknownFieldSet fields,
+   final Appendable output)
+   throws IOException {
+DEFAULT_PRINTER.printUnknownFields(fields, new TextGenerator(output));
+  }
+
+  /**
+   * Same as {@code print()}, except that non-ASCII characters are not
+   * escaped.
+   */
+  public static void printUnicode(
+  final MessageOrBuilder message, final Appendable output)
+  throws IOException {
+UNICODE_PRINTER.print(message, new TextGenerator(output));
+  }
+
+  /**
+   * Same as {@code print()}, except that non-ASCII characters are not
+   * escaped.
+   */
+  public static void printUnicode(final 

[28/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c2838ba..0572dcf 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -50,7 +50,7 @@ import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue;
 import 
org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever;
-import 
org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.NonceKey;
@@ -593,7 +593,7 @@ public class ProcedureExecutor {
 List procedureLists =
 new ArrayList(procedures.size() + completed.size());
 for (java.util.Map.Entry p: procedures.entrySet()) {
-  procedureLists.add(Procedure.createProcedureInfo(p.getValue(), null));
+  procedureLists.add(ProcedureUtil.createProcedureInfo(p.getValue()));
 }
 for (java.util.Map.Entry e: completed.entrySet()) {
   // Note: The procedure could show up twice in the list with different 
state, as
@@ -1349,7 +1349,7 @@ public class ProcedureExecutor {
 execCompletionCleanup(proc);
 
 // update the executor internal state maps
-ProcedureInfo procInfo = Procedure.createProcedureInfo(proc, 
proc.getNonceKey());
+ProcedureInfo procInfo = ProcedureUtil.createProcedureInfo(proc, 
proc.getNonceKey());
 if (!proc.shouldWaitClientAck(getEnvironment())) {
   procInfo.setClientAckTime(0);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
new file mode 100644
index 000..05e8e09
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ProcedureState;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
+import org.apache.hadoop.hbase.util.NonceKey;
+
+/**
+ * Helper to convert to/from ProcedureProtos
+ */
+@InterfaceAudience.Private
+public final class ProcedureUtil {
+
+  private ProcedureUtil() { }
+
+  /**
+   * @return Convert the current {@link ProcedureInfo} into a Protocol Buffers 
Procedure
+   * instance.
+   */
+  public static ProcedureProtos.Procedure convertToProcedureProto(final 
ProcedureInfo procInfo) {
+final ProcedureProtos.Procedure.Builder builder = 
ProcedureProtos.Procedure.newBuilder();
+
+builder.setClassName(procInfo.getProcName());
+builder.setProcId(procInfo.getProcId());
+builder.setStartTime(procInfo.getStartTime());
+
builder.setState(ProcedureProtos.ProcedureState.valueOf(procInfo.getProcState().name()));
+builder.setLastUpdate(procInfo.getLastUpdate());
+
+if (procInfo.hasParentId()) {
+  

[67/77] [abbrv] hbase git commit: HBASE-16776 Remove duplicated versions of countRow() in tests

2016-10-07 Thread syuanjiang
HBASE-16776 Remove duplicated versions of countRow() in tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b548d497
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b548d497
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b548d497

Branch: refs/heads/hbase-12439
Commit: b548d4978b0bdbfc6bd4e68c3d13e00f8ea4002e
Parents: 06758bf
Author: Matteo Bertozzi 
Authored: Wed Oct 5 19:45:50 2016 -0700
Committer: Matteo Bertozzi 
Committed: Wed Oct 5 19:45:50 2016 -0700

--
 .../hadoop/hbase/HBaseTestingUtility.java   | 34 
 .../hadoop/hbase/client/TestFromClientSide.java | 37 +
 .../procedure/TestServerCrashProcedure.java | 12 +-
 .../regionserver/TestMobStoreCompaction.java| 39 --
 .../TestRegionMergeTransaction.java | 37 +
 .../regionserver/TestSplitTransaction.java  | 42 ++--
 .../hadoop/hbase/util/BaseTestHBaseFsck.java| 18 ++---
 7 files changed, 72 insertions(+), 147 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b548d497/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index cc384de..c74c399 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2219,13 +2219,7 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
 for (byte[] family: families) {
   scan.addFamily(family);
 }
-ResultScanner results = table.getScanner(scan);
-int count = 0;
-for (@SuppressWarnings("unused") Result res : results) {
-  count++;
-}
-results.close();
-return count;
+return countRows(table, scan);
   }
 
   /**
@@ -2240,6 +2234,32 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
 }
   }
 
+  public int countRows(final Region region) throws IOException {
+return countRows(region, new Scan());
+  }
+
+  public int countRows(final Region region, final Scan scan) throws 
IOException {
+InternalScanner scanner = region.getScanner(scan);
+try {
+  return countRows(scanner);
+} finally {
+  scanner.close();
+}
+  }
+
+  public int countRows(final InternalScanner scanner) throws IOException {
+// Do not retrieve the mob data when scanning
+int scannedCount = 0;
+List results = new ArrayList();
+boolean hasMore = true;
+while (hasMore) {
+  hasMore = scanner.next(results);
+  scannedCount += results.size();
+  results.clear();
+}
+return scannedCount;
+  }
+
   /**
* Return an md5 digest of the entire contents of a table.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/b548d497/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index f10cce3a..50a566a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -504,7 +504,7 @@ public class TestFromClientSide {
 byte [] endKey = regions.get(0).getRegionInfo().getEndKey();
 // Count rows with a filter that stops us before passed 'endKey'.
 // Should be count of rows in first region.
-int endKeyCount = countRows(t, createScanWithRowFilter(endKey));
+int endKeyCount = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey));
 assertTrue(endKeyCount < rowCount);
 
 // How do I know I did not got to second region?  Thats tough.  Can't 
really
@@ -516,29 +516,29 @@ public class TestFromClientSide {
 // New test.  Make it so scan goes into next region by one and then two.
 // Make sure count comes out right.
 byte [] key = new byte [] {endKey[0], endKey[1], (byte)(endKey[2] + 1)};
-int plusOneCount = countRows(t, createScanWithRowFilter(key));
+int plusOneCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key));
 assertEquals(endKeyCount + 1, plusOneCount);
 key = new byte [] {endKey[0], endKey[1], (byte)(endKey[2] + 2)};
-int plusTwoCount = countRows(t, createScanWithRowFilter(key));
+int plusTwoCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key));
 

[03/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
new file mode 100644
index 000..99e306b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * An {@code RpcController} mediates a single method call.  The primary
+ * purpose of the controller is to provide a way to manipulate settings
+ * specific to the RPC implementation and to find out about RPC-level errors.
+ *
+ * Starting with version 2.3.0, RPC implementations should not try to build
+ * on this, but should instead provide code generator plugins which generate
+ * code specific to the particular RPC implementation.  This way the generated
+ * code can be more appropriate for the implementation in use and can avoid
+ * unnecessary layers of indirection.
+ *
+ * The methods provided by the {@code RpcController} interface are intended
+ * to be a "least common denominator" set of features which we expect all
+ * implementations to support.  Specific implementations may provide more
+ * advanced features (e.g. deadline propagation).
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public interface RpcController {
+  // -
+  // These calls may be made from the client side only.  Their results
+  // are undefined on the server side (may throw RuntimeExceptions).
+
+  /**
+   * Resets the RpcController to its initial state so that it may be reused in
+   * a new call.  This can be called from the client side only.  It must not
+   * be called while an RPC is in progress.
+   */
+  void reset();
+
+  /**
+   * After a call has finished, returns true if the call failed.  The possible
+   * reasons for failure depend on the RPC implementation.  {@code failed()}
+   * most only be called on the client side, and must not be called before a
+   * call has finished.
+   */
+  boolean failed();
+
+  /**
+   * If {@code failed()} is {@code true}, returns a human-readable description
+   * of the error.
+   */
+  String errorText();
+
+  /**
+   * Advises the RPC system that the caller desires that the RPC call be
+   * canceled.  The RPC system may cancel it immediately, may wait awhile and
+   * then cancel it, or may not even cancel the call at all.  If the call is
+   * canceled, the "done" callback will still be called and the RpcController
+   * will indicate that the call failed at that time.
+   */
+  void startCancel();
+
+  // -
+  // These calls may be made from the server side only.  Their results
+  // are undefined on the client side (may throw RuntimeExceptions).
+
+  /**
+   * Causes {@code failed()} to return true on the client side.  {@code reason}
+   * will be incorporated into the 

[74/77] [abbrv] hbase git commit: HBASE-16681: Flaky TestReplicationSourceManagerZkImpl

2016-10-07 Thread syuanjiang
HBASE-16681: Flaky TestReplicationSourceManagerZkImpl


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2c7211ec
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2c7211ec
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2c7211ec

Branch: refs/heads/hbase-12439
Commit: 2c7211ec4bd6d83e498ddc82e60d70f411140ee0
Parents: 97c1333
Author: Ashu Pachauri 
Authored: Fri Sep 23 16:04:08 2016 -0700
Committer: Apekshit Sharma 
Committed: Thu Oct 6 16:26:38 2016 -0700

--
 .../TestReplicationSourceManager.java   | 59 +++-
 1 file changed, 46 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2c7211ec/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 7174d5f..c074048 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -56,7 +57,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -64,11 +67,13 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescr
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
 import org.apache.hadoop.hbase.replication.ReplicationStateZKBase;
 import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -437,28 +442,45 @@ public abstract class TestReplicationSourceManager {
 String replicationSourceImplName = 
conf.get("replication.replicationsource.implementation");
 try {
   DummyServer server = new DummyServer();
-  ReplicationQueues rq =
+  final ReplicationQueues rq =
   ReplicationFactory.getReplicationQueues(new 
ReplicationQueuesArguments(
   server.getConfiguration(), server, server.getZooKeeper()));
   rq.init(server.getServerName().toString());
   // Purposely fail ReplicationSourceManager.addSource() by causing 
ReplicationSourceInterface
   // initialization to throw an exception.
-  conf.set("replication.replicationsource.implementation", 
"fakeReplicationSourceImpl");
-  ReplicationPeers rp = manager.getReplicationPeers();
+  conf.set("replication.replicationsource.implementation",
+  FailInitializeDummyReplicationSource.class.getName());
+  final ReplicationPeers rp = manager.getReplicationPeers();
   // Set up the znode and ReplicationPeer for the fake peer
   rp.registerPeer("FakePeer", new 
ReplicationPeerConfig().setClusterKey("localhost:1:/hbase"));
-  rp.peerConnected("FakePeer");
-  // Have ReplicationSourceManager add the fake peer. It should fail to 
initialize a
-  // ReplicationSourceInterface.
-  List fakePeers = new ArrayList<>();
-  fakePeers.add("FakePeer");
-  manager.peerListChanged(fakePeers);
+  // Wait for the peer to get created and connected
+  

[77/77] [abbrv] hbase git commit: HBASE-16657 Expose per-region last major compaction time in RegionServer UI

2016-10-07 Thread syuanjiang
HBASE-16657 Expose per-region last major compaction time in RegionServer UI

Signed-off-by: Gary Helmling 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc9a9724
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc9a9724
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc9a9724

Branch: refs/heads/hbase-12439
Commit: bc9a972451b2899dec1921bc34e86945df304547
Parents: 62bc090
Author: Dustin Pho 
Authored: Sat Sep 24 17:53:55 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 09:49:24 2016 -0700

--
 .../hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc9a9724/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index ab45799..b393137 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -22,6 +22,7 @@
 
 <%import>
 java.util.*;
+org.apache.commons.lang.time.FastDateFormat;
 org.apache.hadoop.hbase.regionserver.HRegionServer;
 org.apache.hadoop.hbase.util.Bytes;
 org.apache.hadoop.hbase.HRegionInfo;
@@ -197,6 +198,7 @@
 Num. Compacting KVs
 Num. Compacted KVs
 Compaction Progress
+Last Major Compaction
 
 
 <%for HRegionInfo r: onlineRegions %>
@@ -211,12 +213,19 @@
 }
 String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
   regionServer.getConfiguration());
+long lastMajorCompactionTs = load.getLastMajorCompactionTs();
+String compactTime = "";
+if (lastMajorCompactionTs > 0) {
+  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+  compactTime = fdf.format(lastMajorCompactionTs);
+}
 
 <% 
displayName %>
 <%if load != null %>
 <% load.getTotalCompactingKVs() %>
 <% load.getCurrentCompactedKVs() %>
 <% percentDone %>
+<% compactTime %>
 
 
 



[41/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
index 426b6a7..d0b6317 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
@@ -76,6 +76,14 @@ public class TestInterfaceAudienceAnnotations {
 }
   }
 
+  class ShadedProtobufClassFilter implements ClassFinder.ClassFilter {
+@Override
+public boolean isCandidateClass(Class c) {
+  return c.getPackage().getName().
+  contains("org.apache.hadoop.hbase.shaded.com.google.protobuf");
+}
+  }
+
   /** Selects classes with one of the {@link InterfaceAudience} annotation in 
their class
* declaration.
*/
@@ -273,6 +281,7 @@ public class TestInterfaceAudienceAnnotations {
   new And(new PublicClassFilter(),
   new Not(new TestClassFilter()),
   new Not(new GeneratedClassFilter()),
+  new Not(new ShadedProtobufClassFilter()),
   new Not(new IsInterfaceStabilityClassFilter()),
   new Not(new InterfaceAudienceAnnotatedClassFilter()),
   new Not(new CloverInstrumentationFilter()))
@@ -312,6 +321,7 @@ public class TestInterfaceAudienceAnnotations {
   new And(new PublicClassFilter(),
   new Not(new TestClassFilter()),
   new Not(new GeneratedClassFilter()),
+  new Not(new ShadedProtobufClassFilter()),
   new InterfaceAudiencePublicAnnotatedClassFilter(),
   new Not(new IsInterfaceStabilityClassFilter()),
   new Not(new InterfaceStabilityAnnotatedClassFilter()))
@@ -355,6 +365,7 @@ public class TestInterfaceAudienceAnnotations {
 new Not((FileNameFilter) new TestFileNameFilter()),
 new And(new PublicClassFilter(), new Not(new TestClassFilter()),
 new Not(new GeneratedClassFilter()),
+new Not(new ShadedProtobufClassFilter()),
 new InterfaceAudiencePublicAnnotatedClassFilter()));
 Set classes = classFinder.findClasses(false);
 return classes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
index 838c40e..d7aa2f0 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java
@@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 1ece448..41c9a56 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -49,35 +49,34 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-import 

[27/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractParser.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractParser.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractParser.java
new file mode 100644
index 000..42924cd
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractParser.java
@@ -0,0 +1,258 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.LimitedInputStream;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * A partial implementation of the {@link Parser} interface which implements
+ * as many methods of that interface as possible in terms of other methods.
+ *
+ * Note: This class implements all the convenience methods in the
+ * {@link Parser} interface. See {@link Parser} for related javadocs.
+ * Subclasses need to implement
+ * {@link Parser#parsePartialFrom(CodedInputStream, ExtensionRegistryLite)}
+ *
+ * @author liuj...@google.com (Pherl Liu)
+ */
+public abstract class AbstractParser
+implements Parser {
+  /**
+   * Creates an UninitializedMessageException for MessageType.
+   */
+  private UninitializedMessageException
+  newUninitializedMessageException(MessageType message) {
+if (message instanceof AbstractMessageLite) {
+  return ((AbstractMessageLite) 
message).newUninitializedMessageException();
+}
+return new UninitializedMessageException(message);
+  }
+
+  /**
+   * Helper method to check if message is initialized.
+   *
+   * @throws InvalidProtocolBufferException if it is not initialized.
+   * @return The message to check.
+   */
+  private MessageType checkMessageInitialized(MessageType message)
+  throws InvalidProtocolBufferException {
+if (message != null && !message.isInitialized()) {
+  throw newUninitializedMessageException(message)
+  .asInvalidProtocolBufferException()
+  .setUnfinishedMessage(message);
+}
+return message;
+  }
+
+  private static final ExtensionRegistryLite EMPTY_REGISTRY
+  = ExtensionRegistryLite.getEmptyRegistry();
+
+  @Override
+  public MessageType parsePartialFrom(CodedInputStream input)
+  throws InvalidProtocolBufferException {
+return parsePartialFrom(input, EMPTY_REGISTRY);
+  }
+
+  @Override
+  public MessageType parseFrom(CodedInputStream input, ExtensionRegistryLite 
extensionRegistry)
+  throws InvalidProtocolBufferException {
+return checkMessageInitialized(
+parsePartialFrom(input, extensionRegistry));
+  }
+
+  @Override
+  public MessageType parseFrom(CodedInputStream input) throws 
InvalidProtocolBufferException {
+return parseFrom(input, EMPTY_REGISTRY);
+  }
+
+  @Override
+  public MessageType parsePartialFrom(ByteString data, ExtensionRegistryLite 
extensionRegistry)
+  throws InvalidProtocolBufferException {
+MessageType message;
+try {
+  CodedInputStream input = data.newCodedInput();
+

[59/77] [abbrv] hbase git commit: HBASE-16763 Remove unintentional dependency on net.sf.ehcache.search.Results

2016-10-07 Thread syuanjiang
HBASE-16763 Remove unintentional dependency on net.sf.ehcache.search.Results


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0daeb635
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0daeb635
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0daeb635

Branch: refs/heads/hbase-12439
Commit: 0daeb635d0af787e433d08f18691c79b2e44b68c
Parents: 89eb71f
Author: Jonathan M Hsieh 
Authored: Tue Oct 4 10:54:24 2016 -0700
Committer: Jonathan M Hsieh 
Committed: Tue Oct 4 16:40:15 2016 -0700

--
 .../java/org/apache/hadoop/hbase/master/MockRegionServer.java| 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0daeb635/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 2d5cd49..3e430b5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -119,8 +119,6 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
-import net.sf.ehcache.search.Results;
-
 /**
  * A mock RegionServer implementation.
  * Use this when you can't bend Mockito to your liking (e.g. return null result
@@ -137,7 +135,7 @@ ClientProtos.ClientService.BlockingInterface, 
RegionServerServices {
   private final Random random = new Random();
 
   /**
-   * Map of regions to map of rows and {@link Results}.  Used as data source 
when
+   * Map of regions to map of rows and {@link Result}.  Used as data source 
when
* {@link MockRegionServer#get(byte[], Get)} is called. Because we have a 
byte
* key, need to use TreeMap and provide a Comparator.  Use
* {@link #setGetResult(byte[], byte[], Result)} filling this map.



[76/77] [abbrv] hbase git commit: HBASE-16773 AccessController should access local region if possible

2016-10-07 Thread syuanjiang
HBASE-16773 AccessController should access local region if possible


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/62bc0901
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/62bc0901
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/62bc0901

Branch: refs/heads/hbase-12439
Commit: 62bc0901236267937b1fd7ebe4f1cab8a6033def
Parents: 96d34f2
Author: tedyu 
Authored: Fri Oct 7 06:22:32 2016 -0700
Committer: tedyu 
Committed: Fri Oct 7 06:22:32 2016 -0700

--
 .../security/access/AccessControlLists.java | 158 +--
 .../hbase/security/access/AccessController.java |  34 ++--
 .../security/access/TestTablePermissions.java   | 118 --
 3 files changed, 166 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/62bc0901/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index b2a4736..1794464 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -22,10 +22,8 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -41,6 +39,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -64,8 +63,6 @@ import org.apache.hadoop.hbase.filter.RegexStringComparator;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
-import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -77,11 +74,6 @@ import org.apache.hadoop.io.Text;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedInputStream;
-import com.google.protobuf.Message;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
 
 /**
  * Maintains lists of permission grants to users and groups to allow for
@@ -153,9 +145,10 @@ public class AccessControlLists {
* Stores a new user permission grant in the access control lists table.
* @param conf the configuration
* @param userPerm the details of the permission to be granted
+   * @param t acl table instance. It is closed upon method return.
* @throws IOException in the case of an error accessing the metadata table
*/
-  static void addUserPermission(Configuration conf, UserPermission userPerm)
+  static void addUserPermission(Configuration conf, UserPermission userPerm, 
Table t)
   throws IOException {
 Permission.Action[] actions = userPerm.getActions();
 byte[] rowKey = userPermissionRowKey(userPerm);
@@ -179,11 +172,10 @@ public class AccessControlLists {
   Bytes.toString(key)+": "+Bytes.toStringBinary(value)
   );
 }
-// TODO: Pass in a Connection rather than create one each time.
-try (Connection connection = ConnectionFactory.createConnection(conf)) {
-  try (Table table = connection.getTable(ACL_TABLE_NAME)) {
-table.put(p);
-  }
+try {
+  t.put(p);
+} finally {
+  t.close();
 }
   }
 
@@ -198,9 +190,10 @@ public class AccessControlLists {
*
* @param conf the configuration
* @param userPerm the details of the permission to be revoked
+   * @param t acl table
* @throws IOException if there is an error accessing the metadata table
*/
-  static void removeUserPermission(Configuration conf, UserPermission userPerm)
+  static void removeUserPermission(Configuration conf, 

[32/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
new file mode 100644
index 000..86c88c6
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
@@ -0,0 +1,2088 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: SecureBulkLoad.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class SecureBulkLoadProtos {
+  private SecureBulkLoadProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface SecureBulkLoadHFilesRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 1;
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+
java.util.List
 
+getFamilyPathList();
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath
 getFamilyPath(int index);
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+int getFamilyPathCount();
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+java.util.List
 
+getFamilyPathOrBuilderList();
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPathOrBuilder
 getFamilyPathOrBuilder(
+int index);
+
+// optional bool assign_seq_num = 2;
+/**
+ * optional bool assign_seq_num = 2;
+ */
+boolean hasAssignSeqNum();
+/**
+ * optional bool assign_seq_num = 2;
+ */
+boolean getAssignSeqNum();
+
+// required .hbase.pb.DelegationToken fs_token = 3;
+/**
+ * required .hbase.pb.DelegationToken fs_token = 3;
+ */
+boolean hasFsToken();
+/**
+ * required .hbase.pb.DelegationToken fs_token = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken 
getFsToken();
+/**
+ * required .hbase.pb.DelegationToken fs_token = 3;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationTokenOrBuilder
 getFsTokenOrBuilder();
+
+// required string bulk_token = 4;
+/**
+ * required string bulk_token = 4;
+ */
+boolean hasBulkToken();
+/**
+ * required string bulk_token = 4;
+ */
+java.lang.String getBulkToken();
+/**
+ * required string bulk_token = 4;
+ */
+com.google.protobuf.ByteString
+getBulkTokenBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SecureBulkLoadHFilesRequest}
+   */
+  public static final class SecureBulkLoadHFilesRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements SecureBulkLoadHFilesRequestOrBuilder {
+// Use SecureBulkLoadHFilesRequest.newBuilder() to construct.
+private 
SecureBulkLoadHFilesRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SecureBulkLoadHFilesRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SecureBulkLoadHFilesRequest defaultInstance;
+public static SecureBulkLoadHFilesRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SecureBulkLoadHFilesRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SecureBulkLoadHFilesRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+

[42/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
new file mode 100644
index 000..3f91ee0
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -0,0 +1,1498 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.shaded.protobuf;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Action;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
+import 

[10/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
new file mode 100644
index 000..89db936
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
@@ -0,0 +1,451 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `int32`.
+ * The JSON representation for `Int32Value` is JSON number.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Int32Value}
+ */
+public  final class Int32Value extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Int32Value)
+Int32ValueOrBuilder {
+  // Use Int32Value.newBuilder() to construct.
+  private 
Int32Value(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Int32Value() {
+value_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Int32Value(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 8: {
+
+value_ = input.readInt32();
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_Int32Value_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_Int32Value_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Int32Value.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Int32Value.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private int value_;
+  /**
+   * 
+   * The int32 value.
+   * 
+   *
+   * optional int32 value = 1;
+   */
+  public int getValue() {
+return value_;
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+if (value_ != 0) {
+  output.writeInt32(1, value_);
+}
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+if (value_ != 0) {
+  size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+.computeInt32Size(1, value_);
+}
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Int32Value)) {
+  return super.equals(obj);
+

[09/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
new file mode 100644
index 000..4d4ac16
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
@@ -0,0 +1,458 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.io.IOException;
+
+/**
+ * LazyFieldLite encapsulates the logic of lazily parsing message fields. It 
stores
+ * the message in a ByteString initially and then parses it on-demand.
+ *
+ * LazyFieldLite is thread-compatible: concurrent reads are safe once the 
proto that this
+ * LazyFieldLite is a part of is no longer being mutated by its Builder. 
However, explicit
+ * synchronization is needed under read/write situations.
+ *
+ * When a LazyFieldLite is used in the context of a MessageLite object, its 
behavior is considered
+ * to be immutable and none of the setter methods in its API are expected to 
be invoked. All of the
+ * getters are expected to be thread-safe. When used in the context of a 
MessageLite.Builder,
+ * setters can be invoked, but there is no guarantee of thread safety.
+ * 
+ * TODO(yatin,dweis): Consider splitting this class's functionality and put 
the mutable methods
+ * into a separate builder class to allow us to give stronger compile-time 
guarantees.
+ *
+ * This class is internal implementation detail of the protobuf library, so 
you don't need to use it
+ * directly.
+ *
+ * @author xia...@google.com (Xiang Li)
+ */
+public class LazyFieldLite {
+  private static final ExtensionRegistryLite EMPTY_REGISTRY =
+  ExtensionRegistryLite.getEmptyRegistry();
+
+  /**
+   * The value associated with the LazyFieldLite object is stored in one or 
more of the following
+   * three fields (delayedBytes, value, memoizedBytes). They should together 
be interpreted as
+   * follows.
+   * 1) delayedBytes can be non-null, while value and memoizedBytes is null. 
The object will be in
+   *this state while the value for the object has not yet been parsed.
+   * 2) Both delayedBytes and value are non-null. The object transitions to 
this state as soon as
+   *some caller needs to access the value (by invoking getValue()).
+   * 3) memoizedBytes is merely an optimization for calls to 
LazyFieldLite.toByteString() to avoid
+   *recomputing the ByteString representation on each call. Instead, when 
the value is parsed
+   *from delayedBytes, we will also assign the contents of delayedBytes to 
memoizedBytes (since
+   *that is the ByteString representation of value).
+   * 4) Finally, if the LazyFieldLite was created directly with a parsed 
MessageLite value, then
+   *delayedBytes will be null, and memoizedBytes will be initialized only 
upon the first call to
+   *LazyFieldLite.toByteString().
+   *
+   * Given the above conditions, any caller that needs a serialized 
representation of 

[52/77] [abbrv] hbase git commit: HBASE-16742) Add chapter for devs on how we do protobufs going forward Fix misspelling noticed by Anoop Sam John.

2016-10-07 Thread syuanjiang
HBASE-16742) Add chapter for devs on how we do protobufs going forward
Fix misspelling noticed by Anoop Sam John.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2508edcd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2508edcd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2508edcd

Branch: refs/heads/hbase-12439
Commit: 2508edcd4e936029182f3386f72e8f2080d43a34
Parents: 95c1dc9
Author: stack 
Authored: Mon Oct 3 22:42:46 2016 -0700
Committer: stack 
Committed: Mon Oct 3 22:42:46 2016 -0700

--
 src/main/asciidoc/_chapters/protobuf.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2508edcd/src/main/asciidoc/_chapters/protobuf.adoc
--
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc 
b/src/main/asciidoc/_chapters/protobuf.adoc
index 39c3200..3cbc504 100644
--- a/src/main/asciidoc/_chapters/protobuf.adoc
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -158,7 +158,7 @@ of the generated protobuf class
 in hbase-protocol and another generated instance that is the same in all
 regards except its protobuf references are to the internal shaded
 version at `org.apache.hadoop.hbase.shaded.protobuf.generated.ServerName`
-(not the 'shaded' addition in the middle of the package name).
+(note the 'shaded' addition in the middle of the package name).
 
 If you extend a proto in `hbase-protocol-shaded` for  internal use,
 consider extending it also in



[51/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
HBASE-15638 Shade protobuf
Which includes

HBASE-16742 Add chapter for devs on how we do protobufs going forward

HBASE-16741 Amend the generate protobufs out-of-band build step
to include shade, pulling in protobuf source and a hook for patching 
protobuf

Removed ByteStringer from hbase-protocol-shaded. Use the protobuf-3.1.0
trick directly instead. Makes stuff cleaner. All under 'shaded' dir is
now generated.

HBASE-16567 Upgrade to protobuf-3.1.x
Regenerate all protos in this module with protoc3.
Redo ByteStringer to use new pb3.1.0 unsafebytesutil
instead of HBaseZeroCopyByteString

HBASE-16264 Figure how to deal with endpoints and shaded pb Shade our 
protobufs.
Do it in a manner that makes it so we can still have in our API references 
to
com.google.protobuf (and in REST). The c.g.p in API is for Coprocessor 
Endpoints (CPEP)

This patch is Tactic #4 from Shading Doc attached to the referenced 
issue.
Figuring an appoach took a while because we have Coprocessor 
Endpoints
mixed in with the core of HBase that are tough to untangle (FIX).

Tactic #4 (the fourth attempt at addressing this issue) is COPY all 
but
the CPEP .proto files currently in hbase-protocol to a new module 
named
hbase-protocol-shaded. Generate .protos again in the new location 
and
then relocate/shade the generated files. Let CPEPs keep on with the
old references at com.google.protobuf.* and
org.apache.hadoop.hbase.protobuf.* but change the hbase core so all
instead refer to the relocated files in their new location at
org.apache.hadoop.hbase.shaded.com.google.protobuf.*.

Let the new module also shade protobufs themselves and change hbase
core to pick up this shaded protobuf rather than directly reference
com.google.protobuf.

This approach allows us to explicitly refer to either the shaded or
non-shaded version of a protobuf class in any particular context 
(though
usually context dictates one or the other). Core runs on shaded 
protobuf.
CPEPs continue to use whatever is on the classpath with
com.google.protobuf.* which is pb2.5.0 for the near future at least.

See above cited doc for follow-ons and downsides. In short, IDEs 
will complain
about not being able to find the shaded protobufs since shading 
happens at package
time; will fix by checking in all generated classes and relocated 
protobuf in
a follow-on. Also, CPEPs currently suffer an extra-copy as 
marshalled from
non-shaded to shaded. To fix. Finally, our .protos are duplicated; 
once
shaded, and once not. Pain, but how else to reveal our protos to 
CPEPs or
C++ client that wants to talk with HBase AND shade protobuf.

Details:

Add a new hbase-protocol-shaded module. It is a copy of 
hbase-protocol
i   with all relocated offset from o.a.h.h. to o.a.h.h.shaded. The new 
module
also includes the relocated pb. It does not include CPEPs. They 
stay in
their old location.

Add another module hbase-endpoint which has in it all the endpoints
that ship as part of hbase -- at least the ones that are not
entangled with core such as AccessControl and Auth. Move all protos
for these CPEPs here as well as their unit tests (mostly moving a
bunch of stuff out of hbase-server module)

Much of the change looks like this:

 -import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 -import 
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos;
 +import org.apache.hadoop.hbase.protobuf.shaded.ProtobufUtil;
 +import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterIdProtos;

In HTable and in HBaseAdmin, regularize the way Callables are used 
and also hide
protobuf usage as much as possible moving it up into Callable super 
classes or out
to utility classes. Still TODO is adding in of retries, etc., but 
can wait on
procedure which will redo all this.

Also in HTable and HBaseAdmin as well as in HRegionServer and 
Server, be explicit
when using non-shaded protobuf. Do the full-path so it is clear. 
This is around
endpoint coprocessors registration of services and execution of 
CPEP methods.

Shrunk ProtobufUtil by moving methods used by one CPEP only back to 
the CPEP either
into Client class or as new Util class; e.g. AccessControlUtil.

There are actually two versions of ProtobufUtil now; a shaded one 
and a subset
that is used by CPEPs doing non-shaded work.

Made it so hbase-common 

[54/77] [abbrv] hbase git commit: HBASE-16742 Add chapter for devs on how we do protobufs going forward; ADDENDUM -- add in Duo Zhang remark

2016-10-07 Thread syuanjiang
HBASE-16742 Add chapter for devs on how we do protobufs going forward; ADDENDUM 
-- add in Duo Zhang remark


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5d34cf6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5d34cf6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5d34cf6

Branch: refs/heads/hbase-12439
Commit: b5d34cf6fc1daeb7e1140c34282885a13a1df473
Parents: 9e0c256
Author: stack 
Authored: Tue Oct 4 08:34:10 2016 -0700
Committer: stack 
Committed: Tue Oct 4 08:34:39 2016 -0700

--
 src/main/asciidoc/_chapters/protobuf.adoc | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5d34cf6/src/main/asciidoc/_chapters/protobuf.adoc
--
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc 
b/src/main/asciidoc/_chapters/protobuf.adoc
index 3cbc504..4181878 100644
--- a/src/main/asciidoc/_chapters/protobuf.adoc
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -74,12 +74,13 @@ practice needs to whither. We'll make plain why in the later
 xref:shaded.protobuf[hbase-2.0.0] section.
 
 [[cpeps]]
-=== Coprocessor Endpoints
+=== Coprocessor Endpoints (CPEPs)
 xref:cp:[Coprocessor Endpoints] are custom API a developer can
 add to HBase. Protobufs are used to describe the methods and arguments
 that comprise the new Service.
 Coprocessor Endpoints should make no use of HBase internals and
-only avail of public APIs. This is not always possible but beware
+only avail of public APIs; ideally a CPEP should depend on Interfaces
+and data structures only. This is not always possible but beware
 that doing so makes the Endpoint brittle, liable to breakage as HBase
 internals evolve. HBase internal APIs annotated as private or evolving
 do not have to respect semantic versioning rules or general java rules on



[73/77] [abbrv] hbase git commit: HBASE-16772 Add verbose option to VerifyReplication for logging good rows

2016-10-07 Thread syuanjiang
HBASE-16772 Add verbose option to VerifyReplication for logging good rows


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/97c13338
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/97c13338
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/97c13338

Branch: refs/heads/hbase-12439
Commit: 97c13338313b6ccf75f3b7988e97f0db5bf5e177
Parents: 7a38509
Author: tedyu 
Authored: Thu Oct 6 11:16:18 2016 -0700
Committer: tedyu 
Committed: Thu Oct 6 11:16:18 2016 -0700

--
 .../replication/VerifyReplication.java  | 24 
 1 file changed, 20 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/97c13338/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 04ae18f..0273b91 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -47,8 +46,8 @@ import org.apache.hadoop.hbase.mapreduce.TableMapper;
 import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -86,6 +85,7 @@ public class VerifyReplication extends Configured implements 
Tool {
   static String peerId = null;
   static String rowPrefixes = null;
   static int sleepMsBeforeReCompare = 0;
+  static boolean verbose = false;
 
   private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
 
@@ -107,6 +107,7 @@ public class VerifyReplication extends Configured 
implements Tool {
 private ResultScanner replicatedScanner;
 private Result currentCompareRowInPeerTable;
 private int sleepMsBeforeReCompare;
+private boolean verbose = false;
 
 /**
  * Map method that compares every scanned row with the equivalent from
@@ -123,6 +124,7 @@ public class VerifyReplication extends Configured 
implements Tool {
   if (replicatedScanner == null) {
 Configuration conf = context.getConfiguration();
 sleepMsBeforeReCompare = conf.getInt(NAME +".sleepMsBeforeReCompare", 
0);
+verbose = conf.getBoolean(NAME +".verbose", false);
 final Scan scan = new Scan();
 scan.setBatch(batch);
 scan.setCacheBlocks(false);
@@ -173,6 +175,9 @@ public class VerifyReplication extends Configured 
implements Tool {
   try {
 Result.compareResults(value, currentCompareRowInPeerTable);
 context.getCounter(Counters.GOODROWS).increment(1);
+if (verbose) {
+  LOG.info("Good row key: " + delimiter + 
Bytes.toString(value.getRow()) + delimiter);
+}
   } catch (Exception e) {
 logFailRowAndIncreaseCounter(context, 
Counters.CONTENT_DIFFERENT_ROWS, value);
 LOG.error("Exception while comparing row : " + e);
@@ -199,6 +204,10 @@ public class VerifyReplication extends Configured 
implements Tool {
   Result sourceResult = sourceTable.get(new Get(row.getRow()));
   Result replicatedResult = replicatedTable.get(new Get(row.getRow()));
   Result.compareResults(sourceResult, replicatedResult);
+  context.getCounter(Counters.GOODROWS).increment(1);
+  if (verbose) {
+LOG.info("Good row key: " + delimiter + 
Bytes.toString(row.getRow()) + delimiter);
+  }
   return;
 } catch (Exception e) {
   LOG.error("recompare fail after sleep, rowkey=" + delimiter +
@@ -311,6 +320,7 @@ public class VerifyReplication extends Configured 
implements Tool {
 conf.setLong(NAME+".startTime", startTime);
 

[06/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
new file mode 100644
index 000..5af69eb
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
@@ -0,0 +1,1541 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/api.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Method represents a method of an api.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Method}
+ */
+public  final class Method extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Method)
+MethodOrBuilder {
+  // Use Method.newBuilder() to construct.
+  private 
Method(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Method() {
+name_ = "";
+requestTypeUrl_ = "";
+requestStreaming_ = false;
+responseTypeUrl_ = "";
+responseStreaming_ = false;
+options_ = java.util.Collections.emptyList();
+syntax_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Method(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 18: {
+java.lang.String s = input.readStringRequireUtf8();
+
+requestTypeUrl_ = s;
+break;
+  }
+  case 24: {
+
+requestStreaming_ = input.readBool();
+break;
+  }
+  case 34: {
+java.lang.String s = input.readStringRequireUtf8();
+
+responseTypeUrl_ = s;
+break;
+  }
+  case 40: {
+
+responseStreaming_ = input.readBool();
+break;
+  }
+  case 50: {
+if (!((mutable_bitField0_ & 0x0020) == 0x0020)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0020;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 56: {
+int rawValue = input.readEnum();
+
+syntax_ = rawValue;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0020) == 0x0020)) {
+options_ = java.util.Collections.unmodifiableList(options_);
+  }
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ApiProto.internal_static_google_protobuf_Method_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ApiProto.internal_static_google_protobuf_Method_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Method.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Method.Builder.class);
+  }
+
+  private int bitField0_;
+  public static 

[26/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
new file mode 100644
index 000..ea15b40
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
@@ -0,0 +1,2473 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/api.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Api is a light-weight descriptor for a protocol buffer service.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Api}
+ */
+public  final class Api extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Api)
+ApiOrBuilder {
+  // Use Api.newBuilder() to construct.
+  private 
Api(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Api() {
+name_ = "";
+methods_ = java.util.Collections.emptyList();
+options_ = java.util.Collections.emptyList();
+version_ = "";
+mixins_ = java.util.Collections.emptyList();
+syntax_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Api(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 18: {
+if (!((mutable_bitField0_ & 0x0002) == 0x0002)) {
+  methods_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0002;
+}
+methods_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Method.parser(),
 extensionRegistry));
+break;
+  }
+  case 26: {
+if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0004;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 34: {
+java.lang.String s = input.readStringRequireUtf8();
+
+version_ = s;
+break;
+  }
+  case 42: {
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.SourceContext.Builder 
subBuilder = null;
+if (sourceContext_ != null) {
+  subBuilder = sourceContext_.toBuilder();
+}
+sourceContext_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.SourceContext.parser(),
 extensionRegistry);
+if (subBuilder != null) {
+  subBuilder.mergeFrom(sourceContext_);
+  sourceContext_ = subBuilder.buildPartial();
+}
+
+break;
+  }
+  case 50: {
+if (!((mutable_bitField0_ & 0x0020) == 0x0020)) {
+  mixins_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0020;
+}
+mixins_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Mixin.parser(),
 extensionRegistry));
+break;
+  }
+  case 56: {
+int rawValue = input.readEnum();
+
+syntax_ = rawValue;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  

[23/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
new file mode 100644
index 000..12d70ce
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
@@ -0,0 +1,2895 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EMPTY_BYTE_ARRAY;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EMPTY_BYTE_BUFFER;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.UTF_8;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.checkNotNull;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_32_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_64_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.MAX_VARINT_SIZE;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Reads and decodes protocol message fields.
+ *
+ * This class contains two kinds of methods: methods that read specific 
protocol message
+ * constructs and field types (e.g. {@link #readTag()} and {@link 
#readInt32()}) and methods that
+ * read low-level values (e.g. {@link #readRawVarint32()} and {@link 
#readRawBytes}). If you are
+ * reading encoded protocol messages, you should use the former methods, but 
if you are reading some
+ * other format of your own design, use the latter.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class CodedInputStream {
+  private static final int DEFAULT_BUFFER_SIZE = 4096;
+  private static final int DEFAULT_RECURSION_LIMIT = 100;
+  private static final int DEFAULT_SIZE_LIMIT = 64 << 20; // 64MB
+
+  /** Visible for subclasses. See setRecursionLimit() */
+  int recursionDepth;
+
+  int recursionLimit = DEFAULT_RECURSION_LIMIT;
+
+  /** Visible for subclasses. See setSizeLimit() */
+  int sizeLimit = DEFAULT_SIZE_LIMIT;
+
+  /** Create a new CodedInputStream wrapping the given InputStream. */
+  public static CodedInputStream newInstance(final InputStream input) {
+return newInstance(input, DEFAULT_BUFFER_SIZE);
+  }
+
+  /** Create a new CodedInputStream wrapping the given InputStream. */
+  static CodedInputStream newInstance(final InputStream input, int bufferSize) 
{
+if (input == null) {
+  // TODO(nathanmittler): Ideally we should throw here. This is done for 
backward compatibility.
+  return newInstance(EMPTY_BYTE_ARRAY);
+}
+return new StreamDecoder(input, bufferSize);
+  }
+
+  /** Create a new CodedInputStream wrapping the given byte array. */
+  public static CodedInputStream 

[44/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
deleted file mode 100644
index e5deabd..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
+++ /dev/null
@@ -1,442 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.protobuf;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import edu.umd.cs.findbugs.annotations.Nullable;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.SingleResponse;
-import org.apache.hadoop.hbase.ipc.ServerRpcController;
-import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcController;
-
-/**
- * Helper utility to build protocol buffer responses,
- * or retrieve data from protocol buffer responses.
- */
-@InterfaceAudience.Private
-public final class ResponseConverter {
-  private static final Log LOG = LogFactory.getLog(ResponseConverter.class);
-
-  private ResponseConverter() {
-  }
-
-// Start utilities for Client
-
-  /**
-   * Get the results from a protocol buffer MultiResponse
-   *
-   * @param request the protocol buffer MultiResponse to convert
-   * @param cells Cells to go with the passed in proto.  Can be 
null.
-   * @return the results that were in the MultiResponse (a Result or an 
Exception).
-   * @throws IOException
-   */
-  public static org.apache.hadoop.hbase.client.MultiResponse getResults(final 
MultiRequest request,
-  final MultiResponse response, final 

[37/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
new file mode 100644
index 000..64cf82e
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
@@ -0,0 +1,1290 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ColumnAggregationWithErrorsProtocol.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class ColumnAggregationWithErrorsProtos {
+  private ColumnAggregationWithErrorsProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ColumnAggregationWithErrorsSumRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// optional bytes qualifier = 2;
+/**
+ * optional bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * optional bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code ColumnAggregationWithErrorsSumRequest}
+   *
+   * 
+   * use unique names for messages in ColumnAggregationXXX.protos due to a bug 
in
+   * protoc or hadoop's protoc compiler.
+   * 
+   */
+  public static final class ColumnAggregationWithErrorsSumRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ColumnAggregationWithErrorsSumRequestOrBuilder {
+// Use ColumnAggregationWithErrorsSumRequest.newBuilder() to construct.
+private 
ColumnAggregationWithErrorsSumRequest(com.google.protobuf.GeneratedMessage.Builder
 builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ColumnAggregationWithErrorsSumRequest(boolean noInit) { 
this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ColumnAggregationWithErrorsSumRequest defaultInstance;
+public static ColumnAggregationWithErrorsSumRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public ColumnAggregationWithErrorsSumRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ColumnAggregationWithErrorsSumRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.internal_static_ColumnAggregationWithErrorsSumRequest_descriptor;
+}
+
+protected 

[45/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
deleted file mode 100644
index 860e0e4..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ /dev/null
@@ -1,1794 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.protobuf;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Action;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
-import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo;
-import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
-import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;

[07/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
new file mode 100644
index 000..51ff83a
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
@@ -0,0 +1,239 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * Helps generate {@link String} representations of {@link MessageLite} protos.
+ */
+// TODO(dweis): Fix map fields.
+final class MessageLiteToString {
+
+  private static final String LIST_SUFFIX = "List";
+  private static final String BUILDER_LIST_SUFFIX = "OrBuilderList";
+  private static final String BYTES_SUFFIX = "Bytes";
+  
+  /**
+   * Returns a {@link String} representation of the {@link MessageLite} 
object.  The first line of
+   * the {@code String} representation representation includes a comment 
string to uniquely identify
+   * the objcet instance. This acts as an indicator that this should not be 
relied on for
+   * comparisons.
+   *
+   * For use by generated code only.
+   */
+  static String toString(MessageLite messageLite, String commentString) {
+StringBuilder buffer = new StringBuilder();
+buffer.append("# ").append(commentString);
+reflectivePrintWithIndent(messageLite, buffer, 0);
+return buffer.toString();
+  }
+
+  /**
+   * Reflectively prints the {@link MessageLite} to the buffer at given {@code 
indent} level.
+   *
+   * @param buffer the buffer to write to
+   * @param indent the number of spaces to indent the proto by
+   */
+  private static void reflectivePrintWithIndent(
+  MessageLite messageLite, StringBuilder buffer, int indent) {
+// Build a map of method name to method. We're looking for methods like 
getFoo(), hasFoo(), and
+// getFooList() which might be useful for building an object's string 
representation.
+Map nameToNoArgMethod = new HashMap();
+Map nameToMethod = new HashMap();
+Set getters = new TreeSet();
+for (Method method : messageLite.getClass().getDeclaredMethods()) {
+  nameToMethod.put(method.getName(), method);
+  if (method.getParameterTypes().length == 0) {
+nameToNoArgMethod.put(method.getName(), method);
+
+if (method.getName().startsWith("get")) {
+  getters.add(method.getName());
+}
+  }
+}
+
+for (String getter : getters) {
+  String suffix = getter.replaceFirst("get", "");
+  if (suffix.endsWith(LIST_SUFFIX) && 
!suffix.endsWith(BUILDER_LIST_SUFFIX)) {
+String camelCase = suffix.substring(0, 1).toLowerCase()
++ suffix.substring(1, suffix.length() - LIST_SUFFIX.length());
+   

[36/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
new file mode 100644
index 000..b25f7aa
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
@@ -0,0 +1,1283 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ColumnAggregationNullResponseProtocol.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class ColumnAggregationWithNullResponseProtos {
+  private ColumnAggregationWithNullResponseProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ColumnAggregationNullResponseSumRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// optional bytes qualifier = 2;
+/**
+ * optional bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * optional bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code ColumnAggregationNullResponseSumRequest}
+   *
+   * 
+   * use unique names for messages in ColumnAggregationXXX.protos due to a bug 
in
+   * protoc or hadoop's protoc compiler.
+   * 
+   */
+  public static final class ColumnAggregationNullResponseSumRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ColumnAggregationNullResponseSumRequestOrBuilder {
+// Use ColumnAggregationNullResponseSumRequest.newBuilder() to construct.
+private 
ColumnAggregationNullResponseSumRequest(com.google.protobuf.GeneratedMessage.Builder
 builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ColumnAggregationNullResponseSumRequest(boolean noInit) { 
this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ColumnAggregationNullResponseSumRequest 
defaultInstance;
+public static ColumnAggregationNullResponseSumRequest getDefaultInstance() 
{
+  return defaultInstance;
+}
+
+public ColumnAggregationNullResponseSumRequest getDefaultInstanceForType() 
{
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ColumnAggregationNullResponseSumRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 

[05/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MixinOrBuilder.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MixinOrBuilder.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MixinOrBuilder.java
new file mode 100644
index 000..8d9311f
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MixinOrBuilder.java
@@ -0,0 +1,47 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/api.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+public interface MixinOrBuilder extends
+// @@protoc_insertion_point(interface_extends:google.protobuf.Mixin)
+org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+  /**
+   * 
+   * The fully qualified name of the API which is included.
+   * 
+   *
+   * optional string name = 1;
+   */
+  java.lang.String getName();
+  /**
+   * 
+   * The fully qualified name of the API which is included.
+   * 
+   *
+   * optional string name = 1;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+  getNameBytes();
+
+  /**
+   * 
+   * If non-empty specifies a path under which inherited HTTP paths
+   * are rooted.
+   * 
+   *
+   * optional string root = 2;
+   */
+  java.lang.String getRoot();
+  /**
+   * 
+   * If non-empty specifies a path under which inherited HTTP paths
+   * are rooted.
+   * 
+   *
+   * optional string root = 2;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+  getRootBytes();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
new file mode 100644
index 000..b1eca4f
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
@@ -0,0 +1,48 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * Verifies that an object is mutable, throwing if not.
+ */
+interface MutabilityOracle {
+  static final MutabilityOracle IMMUTABLE = new MutabilityOracle() {
+@Override
+public void ensureMutable() {
+  throw new UnsupportedOperationException();
+}
+  };
+
+  /**
+   * Throws an {@link UnsupportedOperationException} if not mutable.
+   */
+  void ensureMutable();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/NioByteString.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/NioByteString.java
 

[21/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
new file mode 100644
index 000..99dfec2
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
@@ -0,0 +1,38670 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/descriptor.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+public final class DescriptorProtos {
+  private DescriptorProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface FileDescriptorSetOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:google.protobuf.FileDescriptorSet)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+
java.util.List
 
+getFileList();
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto
 getFile(int index);
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+int getFileCount();
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+java.util.List
 
+getFileOrBuilderList();
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder
 getFileOrBuilder(
+int index);
+  }
+  /**
+   * 
+   * The protocol compiler can output a FileDescriptorSet containing the .proto
+   * files it parses.
+   * 
+   *
+   * Protobuf type {@code google.protobuf.FileDescriptorSet}
+   */
+  public  static final class FileDescriptorSet extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:google.protobuf.FileDescriptorSet)
+  FileDescriptorSetOrBuilder {
+// Use FileDescriptorSet.newBuilder() to construct.
+private 
FileDescriptorSet(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private FileDescriptorSet() {
+  file_ = java.util.Collections.emptyList();
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private FileDescriptorSet(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  if (!((mutable_bitField0_ & 0x0001) == 0x0001)) {
+file_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0001;
+  }
+  file_.add(
+  
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.PARSER,
 extensionRegistry));
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 

[18/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
new file mode 100644
index 000..de6d523
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
@@ -0,0 +1,386 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/empty.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * A generic empty message that you can re-use to avoid defining duplicated
+ * empty messages in your APIs. A typical example is to use it as the request
+ * or the response type of an API method. For instance:
+ * service Foo {
+ *   rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+ * }
+ * The JSON representation for `Empty` is empty JSON object `{}`.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Empty}
+ */
+public  final class Empty extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Empty)
+EmptyOrBuilder {
+  // Use Empty.newBuilder() to construct.
+  private 
Empty(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Empty() {
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Empty(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.EmptyProto.internal_static_google_protobuf_Empty_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.EmptyProto.internal_static_google_protobuf_Empty_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty.Builder.class);
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty)) {
+  return super.equals(obj);
+}
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty other = 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty) obj;
+
+boolean result = true;
+return result;
+  }
+
+  @java.lang.Override
+  public int hashCode() {
+if (memoizedHashCode != 0) {
+  return memoizedHashCode;
+}
+int hash = 41;
+hash = (19 * hash) + 

[11/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
new file mode 100644
index 000..fe92e6f
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
@@ -0,0 +1,2840 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessage.GeneratedExtension;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * All generated protocol message classes extend this class.  This class
+ * implements most of the Message and Builder interfaces using Java reflection.
+ * Users can ignore this class and pretend that generated messages implement
+ * the Message interface directly.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class GeneratedMessageV3 extends AbstractMessage
+implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * For testing. Allows a test to disable the optimization that avoids using
+   * field builders for nested messages until they are requested. By disabling
+   * this optimization, existing tests can be reused to test the field 
builders.
+   */
+  protected static boolean alwaysUseFieldBuilders = false;
+
+  /** For use by generated code only.  */
+  protected UnknownFieldSet unknownFields;
+
+  protected GeneratedMessageV3() {
+unknownFields = UnknownFieldSet.getDefaultInstance();
+  }
+
+  protected GeneratedMessageV3(Builder builder) {
+unknownFields = builder.getUnknownFields();
+  }
+
+  @Override
+  public Parser getParserForType() {
+throw new UnsupportedOperationException(
+"This is supposed to be overridden by subclasses.");
+  }
+
+ /**
+  * For testing. Allows a test to disable the optimization that avoids using
+  * field builders for nested messages until they are requested. By disabling
+  * this optimization, existing tests can be reused to test the field builders.
+  * See {@link 

[04/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
new file mode 100644
index 000..8968a70
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
@@ -0,0 +1,708 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.util.AbstractList;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * {@code RepeatedFieldBuilder} implements a structure that a protocol
+ * message uses to hold a repeated field of other protocol messages. It 
supports
+ * the classical use case of adding immutable {@link Message}'s to the
+ * repeated field and is highly optimized around this (no extra memory
+ * allocations and sharing of immutable arrays).
+ * 
+ * It also supports the additional use case of adding a {@link Message.Builder}
+ * to the repeated field and deferring conversion of that {@code Builder}
+ * to an immutable {@code Message}. In this way, it's possible to maintain
+ * a tree of {@code Builder}'s that acts as a fully read/write data
+ * structure.
+ * 
+ * Logically, one can think of a tree of builders as converting the entire tree
+ * to messages when build is called on the root or when any method is called
+ * that desires a Message instead of a Builder. In terms of the implementation,
+ * the {@code SingleFieldBuilder} and {@code RepeatedFieldBuilder}
+ * classes cache messages that were created so that messages only need to be
+ * created when some change occurred in its builder or a builder for one of its
+ * descendants.
+ *
+ * @param  the type of message for the field
+ * @param  the type of builder for the field
+ * @param  the common interface for the message and the builder
+ *
+ * @author j...@google.com (Jon Perlow)
+ */
+public class RepeatedFieldBuilder
+
+implements GeneratedMessage.BuilderParent {
+
+  // Parent to send changes to.
+  private GeneratedMessage.BuilderParent parent;
+
+  // List of messages. Never null. It may be immutable, in which case
+  // isMessagesListMutable will be false. See note below.
+  private List messages;
+
+  // Whether messages is an mutable array that can be modified.
+  private boolean isMessagesListMutable;
+
+  // List of builders. May be null, in which case, no nested builders were
+  // created. If not null, entries represent the builder for that index.
+  private List> builders;
+
+  // Here are the invariants for messages and builders:
+  // 1. messages is never null and its count corresponds to the number of items
+  //in the repeated field.
+  // 2. If builders is non-null, messages and builders MUST always
+  //contain the same number of items.
+  // 3. Entries in either array can be null, but for any index, there MUST be
+  //either 

[02/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
new file mode 100644
index 000..70f4acf
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
@@ -0,0 +1,532 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `string`.
+ * The JSON representation for `StringValue` is JSON string.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.StringValue}
+ */
+public  final class StringValue extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.StringValue)
+StringValueOrBuilder {
+  // Use StringValue.newBuilder() to construct.
+  private 
StringValue(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private StringValue() {
+value_ = "";
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private StringValue(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+value_ = s;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_StringValue_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_StringValue_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.StringValue.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.StringValue.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private volatile java.lang.Object value_;
+  /**
+   * 
+   * The string value.
+   * 
+   *
+   * optional string value = 1;
+   */
+  public java.lang.String getValue() {
+java.lang.Object ref = value_;
+if (ref instanceof java.lang.String) {
+  return (java.lang.String) ref;
+} else {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+  (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+  java.lang.String s = bs.toStringUtf8();
+  value_ = s;
+  return s;
+}
+  }
+  /**
+   * 
+   * The string value.
+   * 
+   *
+   * optional string value = 1;
+   */
+  public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+  getValueBytes() {
+java.lang.Object ref = value_;
+if (ref instanceof java.lang.String) {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+  (java.lang.String) ref);
+  value_ = b;
+  return b;
+} else {
+  return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
+}
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final 

[22/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
new file mode 100644
index 000..03871c9
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
@@ -0,0 +1,3001 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_32_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_64_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.MAX_VARINT_SIZE;
+import static java.lang.Math.max;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Utf8.UnpairedSurrogateException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Encodes and writes protocol message fields.
+ *
+ * This class contains two kinds of methods:  methods that write specific
+ * protocol message constructs and field types (e.g. {@link #writeTag} and
+ * {@link #writeInt32}) and methods that write low-level values (e.g.
+ * {@link #writeRawVarint32} and {@link #writeRawBytes}).  If you are
+ * writing encoded protocol messages, you should use the former methods, but if
+ * you are writing some other format of your own design, use the latter.
+ *
+ * This class is totally unsynchronized.
+ */
+public abstract class CodedOutputStream extends ByteOutput {
+  private static final Logger logger = 
Logger.getLogger(CodedOutputStream.class.getName());
+  private static final boolean HAS_UNSAFE_ARRAY_OPERATIONS = 
UnsafeUtil.hasUnsafeArrayOperations();
+  private static final long ARRAY_BASE_OFFSET = 
UnsafeUtil.getArrayBaseOffset();
+
+  /**
+   * @deprecated Use {@link #computeFixed32SizeNoTag(int)} instead.
+   */
+  @Deprecated
+  public static final int LITTLE_ENDIAN_32_SIZE = FIXED_32_SIZE;
+
+  /**
+   * The buffer size used in {@link #newInstance(OutputStream)}.
+   */
+  public static final int DEFAULT_BUFFER_SIZE = 4096;
+
+  /**
+   * Returns the buffer size to efficiently write dataLength bytes to this
+   * CodedOutputStream. Used by AbstractMessageLite.
+   *
+   * @return the buffer size to efficiently write dataLength bytes to this
+   * CodedOutputStream.
+   */
+  static int computePreferredBufferSize(int dataLength) {
+if (dataLength > DEFAULT_BUFFER_SIZE) {
+  return DEFAULT_BUFFER_SIZE;
+}
+return dataLength;
+  }
+
+  /**
+   * Create a new {@code CodedOutputStream} wrapping the given {@code 
OutputStream}.
+   *
+   *  NOTE: The provided {@link OutputStream} MUST NOT 
retain access or
+   * modify the provided byte arrays. Doing so may result in corrupted data, 
which would be
+   * difficult to debug.
+   

[15/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
new file mode 100644
index 000..9c311e4
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
@@ -0,0 +1,903 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/field_mask.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * `FieldMask` represents a set of symbolic field paths, for example:
+ * paths: "f.a"
+ * paths: "f.b.d"
+ * Here `f` represents a field in some root message, `a` and `b`
+ * fields in the message found in `f`, and `d` a field found in the
+ * message in `f.b`.
+ * Field masks are used to specify a subset of fields that should be
+ * returned by a get operation or modified by an update operation.
+ * Field masks also have a custom JSON encoding (see below).
+ * # Field Masks in Projections
+ * When used in the context of a projection, a response message or
+ * sub-message is filtered by the API to only contain those fields as
+ * specified in the mask. For example, if the mask in the previous
+ * example is applied to a response message as follows:
+ * f {
+ *   a : 22
+ *   b {
+ * d : 1
+ * x : 2
+ *   }
+ *   y : 13
+ * }
+ * z: 8
+ * The result will not contain specific values for fields x,y and z
+ * (their value will be set to the default, and omitted in proto text
+ * output):
+ * f {
+ *   a : 22
+ *   b {
+ * d : 1
+ *   }
+ * }
+ * A repeated field is not allowed except at the last position of a
+ * paths string.
+ * If a FieldMask object is not present in a get operation, the
+ * operation applies to all fields (as if a FieldMask of all fields
+ * had been specified).
+ * Note that a field mask does not necessarily apply to the
+ * top-level response message. In case of a REST get operation, the
+ * field mask applies directly to the response, but in case of a REST
+ * list operation, the mask instead applies to each individual message
+ * in the returned resource list. In case of a REST custom method,
+ * other definitions may be used. Where the mask applies will be
+ * clearly documented together with its declaration in the API.  In
+ * any case, the effect on the returned resource/resources is required
+ * behavior for APIs.
+ * # Field Masks in Update Operations
+ * A field mask in update operations specifies which fields of the
+ * targeted resource are going to be updated. The API is required
+ * to only change the values of the fields as specified in the mask
+ * and leave the others untouched. If a resource is passed in to
+ * describe the updated values, the API ignores the values of all
+ * fields not covered by the mask.
+ * If a repeated field is specified for an update operation, the existing
+ * repeated values in the target resource will be overwritten by the new 
values.
+ * Note that a repeated field is only allowed in the last position of a `paths`
+ * string.
+ * If a sub-message is specified in the last position of the field mask for an
+ * update operation, then the existing sub-message in the target resource is
+ * overwritten. Given the target message:
+ * f {
+ *   b {
+ * d : 1
+ * x : 2
+ *   }
+ *   c : 1
+ * }
+ * And an update message:
+ * f {
+ *   b {
+ * d : 10
+ *   }
+ * }
+ * then if the field mask is:
+ *  paths: "f.b"
+ * then the result will be:
+ * f {
+ *   b {
+ * d : 10
+ *   }
+ *   c : 1
+ * }
+ * However, if the update mask was:
+ *  paths: "f.b.d"
+ * then the result would be:
+ * f {
+ *   b {
+ * d : 10
+ * x : 2
+ *   }
+ *   c : 1
+ * }
+ * In order to reset a field's value to the default, the field must
+ * be in the mask and set to the default value in the provided resource.
+ * Hence, in order to reset all fields of a resource, provide a default
+ * instance of the resource and set all fields in the mask, or do
+ * not provide a mask as described below.
+ * If a field mask is not present on update, the operation applies to
+ * all fields (as if a field mask of all fields has been specified).
+ * Note that in the presence of schema evolution, this may mean that
+ * fields the client does not know and has therefore not filled into
+ * the request will be reset to their default. If this is unwanted
+ * behavior, a specific service may require a client to always specify
+ * a field mask, producing an error if not.
+ * As with get 

[17/77] [abbrv] [partial] hbase git commit: HBASE-15638 Shade protobuf Which includes

2016-10-07 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/95c1dc93/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
new file mode 100644
index 000..83771f5
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
@@ -0,0 +1,1044 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/type.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Enum value definition.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.EnumValue}
+ */
+public  final class EnumValue extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.EnumValue)
+EnumValueOrBuilder {
+  // Use EnumValue.newBuilder() to construct.
+  private 
EnumValue(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private EnumValue() {
+name_ = "";
+number_ = 0;
+options_ = java.util.Collections.emptyList();
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private EnumValue(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 16: {
+
+number_ = input.readInt32();
+break;
+  }
+  case 26: {
+if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0004;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0004) == 0x0004)) {
+options_ = java.util.Collections.unmodifiableList(options_);
+  }
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TypeProto.internal_static_google_protobuf_EnumValue_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TypeProto.internal_static_google_protobuf_EnumValue_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.EnumValue.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.EnumValue.Builder.class);
+  }
+
+  private int bitField0_;
+  public static final int NAME_FIELD_NUMBER = 1;
+  private volatile java.lang.Object name_;
+  /**
+   * 
+   * Enum value name.
+   * 
+   *
+   * optional string name = 1;
+   */
+  public java.lang.String getName() {
+java.lang.Object ref = name_;
+if (ref instanceof java.lang.String) {
+  return (java.lang.String) ref;
+} else {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+  (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+  java.lang.String s = bs.toStringUtf8();
+  name_ = s;
+  return s;
+}
+  }
+  /**
+   * 
+   * Enum value name.
+   * 
+   *
+   * optional 

hbase git commit: HBASE-16657 Expose per-region last major compaction time in RegionServer UI

2016-10-07 Thread garyh
Repository: hbase
Updated Branches:
  refs/heads/master 62bc09012 -> bc9a97245


HBASE-16657 Expose per-region last major compaction time in RegionServer UI

Signed-off-by: Gary Helmling 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc9a9724
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc9a9724
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc9a9724

Branch: refs/heads/master
Commit: bc9a972451b2899dec1921bc34e86945df304547
Parents: 62bc090
Author: Dustin Pho 
Authored: Sat Sep 24 17:53:55 2016 -0700
Committer: Gary Helmling 
Committed: Fri Oct 7 09:49:24 2016 -0700

--
 .../hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc9a9724/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index ab45799..b393137 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -22,6 +22,7 @@
 
 <%import>
 java.util.*;
+org.apache.commons.lang.time.FastDateFormat;
 org.apache.hadoop.hbase.regionserver.HRegionServer;
 org.apache.hadoop.hbase.util.Bytes;
 org.apache.hadoop.hbase.HRegionInfo;
@@ -197,6 +198,7 @@
 Num. Compacting KVs
 Num. Compacted KVs
 Compaction Progress
+Last Major Compaction
 
 
 <%for HRegionInfo r: onlineRegions %>
@@ -211,12 +213,19 @@
 }
 String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
   regionServer.getConfiguration());
+long lastMajorCompactionTs = load.getLastMajorCompactionTs();
+String compactTime = "";
+if (lastMajorCompactionTs > 0) {
+  FastDateFormat fdf = FastDateFormat.getInstance("-MM-dd 
HH:mm ZZ");
+  compactTime = fdf.format(lastMajorCompactionTs);
+}
 
 <% 
displayName %>
 <%if load != null %>
 <% load.getTotalCompactingKVs() %>
 <% load.getCurrentCompactedKVs() %>
 <% percentDone %>
+<% compactTime %>
 
 
 



hbase git commit: HBASE-16773 AccessController should access local region if possible

2016-10-07 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 96d34f2a7 -> 62bc09012


HBASE-16773 AccessController should access local region if possible


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/62bc0901
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/62bc0901
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/62bc0901

Branch: refs/heads/master
Commit: 62bc0901236267937b1fd7ebe4f1cab8a6033def
Parents: 96d34f2
Author: tedyu 
Authored: Fri Oct 7 06:22:32 2016 -0700
Committer: tedyu 
Committed: Fri Oct 7 06:22:32 2016 -0700

--
 .../security/access/AccessControlLists.java | 158 +--
 .../hbase/security/access/AccessController.java |  34 ++--
 .../security/access/TestTablePermissions.java   | 118 --
 3 files changed, 166 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/62bc0901/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index b2a4736..1794464 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -22,10 +22,8 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -41,6 +39,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -64,8 +63,6 @@ import org.apache.hadoop.hbase.filter.RegexStringComparator;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
-import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -77,11 +74,6 @@ import org.apache.hadoop.io.Text;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedInputStream;
-import com.google.protobuf.Message;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
 
 /**
  * Maintains lists of permission grants to users and groups to allow for
@@ -153,9 +145,10 @@ public class AccessControlLists {
* Stores a new user permission grant in the access control lists table.
* @param conf the configuration
* @param userPerm the details of the permission to be granted
+   * @param t acl table instance. It is closed upon method return.
* @throws IOException in the case of an error accessing the metadata table
*/
-  static void addUserPermission(Configuration conf, UserPermission userPerm)
+  static void addUserPermission(Configuration conf, UserPermission userPerm, 
Table t)
   throws IOException {
 Permission.Action[] actions = userPerm.getActions();
 byte[] rowKey = userPermissionRowKey(userPerm);
@@ -179,11 +172,10 @@ public class AccessControlLists {
   Bytes.toString(key)+": "+Bytes.toStringBinary(value)
   );
 }
-// TODO: Pass in a Connection rather than create one each time.
-try (Connection connection = ConnectionFactory.createConnection(conf)) {
-  try (Table table = connection.getTable(ACL_TABLE_NAME)) {
-table.put(p);
-  }
+try {
+  t.put(p);
+} finally {
+  t.close();
 }
   }
 
@@ -198,9 +190,10 @@ public class AccessControlLists {
*
* @param conf the configuration
* @param userPerm the details of the permission to be revoked
+   * @param t acl table
* @throws IOException if there is an error accessing the metadata table
*/
-  static void removeUserPermission(Configuration conf, 

hbase git commit: HBASE-16768 Inconsistent results from the Append/Increment (ChiaPing Tsai)

2016-10-07 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 2c7211ec4 -> 96d34f2a7


HBASE-16768 Inconsistent results from the Append/Increment (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96d34f2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96d34f2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96d34f2a

Branch: refs/heads/master
Commit: 96d34f2a79bf977d83f0b9814b253669e6c6e671
Parents: 2c7211e
Author: tedyu 
Authored: Fri Oct 7 00:59:27 2016 -0700
Committer: tedyu 
Committed: Fri Oct 7 00:59:27 2016 -0700

--
 .../hbase/regionserver/AbstractMemStore.java|  9 +++
 .../hadoop/hbase/regionserver/HRegion.java  | 51 
 .../hadoop/hbase/regionserver/HStore.java   | 10 
 .../hadoop/hbase/regionserver/MemStore.java |  7 +++
 .../apache/hadoop/hbase/regionserver/Store.java |  7 +++
 .../hadoop/hbase/client/TestFromClientSide.java | 61 
 6 files changed, 121 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/96d34f2a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index aa6576f..5544251 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -97,6 +97,15 @@ public abstract class AbstractMemStore implements MemStore {
*/
   public abstract void updateLowestUnflushedSequenceIdInWAL(boolean 
onlyIfMoreRecent);
 
+  @Override
+  public long add(Iterable cells) {
+long size = 0;
+for (Cell cell : cells) {
+  size += add(cell);
+}
+return size;
+  }
+  
   /**
* Write an update
* @param cell the cell to be added

http://git-wip-us.apache.org/repos/asf/hbase/blob/96d34f2a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 757ddab..d1684a3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3256,8 +3256,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 if (batchOp.retCodeDetails[i].getOperationStatusCode() != 
OperationStatusCode.NOT_RUN) {
   continue;
 }
-addedSize += applyFamilyMapToMemstore(familyMaps[i], replay,
+// We need to update the sequence id for following reasons.
+// 1) If the op is in replay mode, FSWALEntry#stampRegionSequenceId 
won't stamp sequence id.
+// 2) If no WAL, FSWALEntry won't be used
+boolean updateSeqId = replay || batchOp.getMutation(i).getDurability() 
== Durability.SKIP_WAL;
+if (updateSeqId) {
+  this.updateSequenceId(familyMaps[i].values(),
 replay? batchOp.getReplaySequenceId(): 
writeEntry.getWriteNumber());
+}
+addedSize += applyFamilyMapToMemstore(familyMaps[i]);
   }
 
   // STEP 6. Complete mvcc.
@@ -3673,6 +3680,16 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
+  private void updateSequenceId(final Iterable cellItr, final long 
sequenceId)
+  throws IOException {
+for (List cells: cellItr) {
+  if (cells == null) return;
+  for (Cell cell : cells) {
+CellUtil.setSequenceId(cell, sequenceId);
+  }
+}
+  }
+
   @Override
   public void updateCellTimestamps(final Iterable cellItr, final 
byte[] now)
   throws IOException {
@@ -3783,15 +3800,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* @param familyMap Map of Cells by family
* @return the additional memory usage of the memstore caused by the new 
entries.
*/
-  private long applyFamilyMapToMemstore(Map familyMap, 
boolean replay,
-  long sequenceId)
+  private long applyFamilyMapToMemstore(Map familyMap)
   throws IOException {
 long size = 0;
 for (Map.Entry e : familyMap.entrySet()) {
   byte[] family = e.getKey();
   List cells = e.getValue();
   assert cells instanceof RandomAccess;
-  size += applyToMemstore(getStore(family), 

hbase git commit: HBASE-16723 RMI registry is not destroyed after stopping JMX Connector Server

2016-10-07 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 c7007ac57 -> 017bc3337


HBASE-16723 RMI registry is not destroyed after stopping JMX Connector Server

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/017bc333
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/017bc333
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/017bc333

Branch: refs/heads/branch-1.2
Commit: 017bc3337eb8c1ab56c388b7302de231cafba6f7
Parents: c7007ac
Author: Pankaj Kumar 
Authored: Fri Oct 7 12:13:48 2016 +0530
Committer: Ashish Singhi 
Committed: Fri Oct 7 12:13:48 2016 +0530

--
 .../java/org/apache/hadoop/hbase/JMXListener.java  | 17 +
 1 file changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/017bc333/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
index 2872cfa..9265fb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.hbase.coprocessor.*;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.rmi.registry.LocateRegistry;
+import java.rmi.registry.Registry;
 import java.rmi.server.RMIClientSocketFactory;
 import java.rmi.server.RMIServerSocketFactory;
+import java.rmi.server.UnicastRemoteObject;
 import java.util.HashMap;
 
 import javax.management.MBeanServer;
@@ -36,8 +38,6 @@ import javax.management.remote.JMXConnectorServer;
 import javax.management.remote.JMXConnectorServerFactory;
 import javax.management.remote.JMXServiceURL;
 import javax.management.remote.rmi.RMIConnectorServer;
-import javax.rmi.ssl.SslRMIClientSocketFactory;
-import javax.rmi.ssl.SslRMIServerSocketFactory;
 
 /**
  * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue
@@ -61,6 +61,7 @@ public class JMXListener implements Coprocessor {
* we only load regionserver coprocessor on master
*/
   private static JMXConnectorServer JMX_CS = null;
+  private Registry rmiRegistry = null;
 
   public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort,
   int rmiConnectorPort) throws IOException {
@@ -128,7 +129,7 @@ public class JMXListener implements Coprocessor {
 }
 
 // Create the RMI registry
-LocateRegistry.createRegistry(rmiRegistryPort);
+rmiRegistry = LocateRegistry.createRegistry(rmiRegistryPort);
 // Retrieve the PlatformMBeanServer.
 MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
 
@@ -147,17 +148,25 @@ public class JMXListener implements Coprocessor {
   LOG.info("ConnectorServer started!");
 } catch (IOException e) {
   LOG.error("fail to start connector server!", e);
+  // deregister the RMI registry
+  if (rmiRegistry != null) {
+UnicastRemoteObject.unexportObject(rmiRegistry, true);
+  }
 }
 
   }
 
   public void stopConnectorServer() throws IOException {
-synchronized(JMXListener.class) {
+synchronized (JMXListener.class) {
   if (JMX_CS != null) {
 JMX_CS.stop();
 LOG.info("ConnectorServer stopped!");
 JMX_CS = null;
   }
+  // deregister the RMI registry
+  if (rmiRegistry != null) {
+UnicastRemoteObject.unexportObject(rmiRegistry, true);
+  }
 }
   }
 



hbase git commit: HBASE-16723 RMI registry is not destroyed after stopping JMX Connector Server

2016-10-07 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 e8f0ccc81 -> a52188f97


HBASE-16723 RMI registry is not destroyed after stopping JMX Connector Server

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a52188f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a52188f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a52188f9

Branch: refs/heads/branch-1.3
Commit: a52188f97e7e32fc79b35d7155a44a5ad31bbd6f
Parents: e8f0ccc
Author: Pankaj Kumar 
Authored: Fri Oct 7 12:08:50 2016 +0530
Committer: Ashish Singhi 
Committed: Fri Oct 7 12:08:50 2016 +0530

--
 .../java/org/apache/hadoop/hbase/JMXListener.java  | 17 +
 1 file changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a52188f9/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
index 2872cfa..9265fb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.hbase.coprocessor.*;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.rmi.registry.LocateRegistry;
+import java.rmi.registry.Registry;
 import java.rmi.server.RMIClientSocketFactory;
 import java.rmi.server.RMIServerSocketFactory;
+import java.rmi.server.UnicastRemoteObject;
 import java.util.HashMap;
 
 import javax.management.MBeanServer;
@@ -36,8 +38,6 @@ import javax.management.remote.JMXConnectorServer;
 import javax.management.remote.JMXConnectorServerFactory;
 import javax.management.remote.JMXServiceURL;
 import javax.management.remote.rmi.RMIConnectorServer;
-import javax.rmi.ssl.SslRMIClientSocketFactory;
-import javax.rmi.ssl.SslRMIServerSocketFactory;
 
 /**
  * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue
@@ -61,6 +61,7 @@ public class JMXListener implements Coprocessor {
* we only load regionserver coprocessor on master
*/
   private static JMXConnectorServer JMX_CS = null;
+  private Registry rmiRegistry = null;
 
   public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort,
   int rmiConnectorPort) throws IOException {
@@ -128,7 +129,7 @@ public class JMXListener implements Coprocessor {
 }
 
 // Create the RMI registry
-LocateRegistry.createRegistry(rmiRegistryPort);
+rmiRegistry = LocateRegistry.createRegistry(rmiRegistryPort);
 // Retrieve the PlatformMBeanServer.
 MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
 
@@ -147,17 +148,25 @@ public class JMXListener implements Coprocessor {
   LOG.info("ConnectorServer started!");
 } catch (IOException e) {
   LOG.error("fail to start connector server!", e);
+  // deregister the RMI registry
+  if (rmiRegistry != null) {
+UnicastRemoteObject.unexportObject(rmiRegistry, true);
+  }
 }
 
   }
 
   public void stopConnectorServer() throws IOException {
-synchronized(JMXListener.class) {
+synchronized (JMXListener.class) {
   if (JMX_CS != null) {
 JMX_CS.stop();
 LOG.info("ConnectorServer stopped!");
 JMX_CS = null;
   }
+  // deregister the RMI registry
+  if (rmiRegistry != null) {
+UnicastRemoteObject.unexportObject(rmiRegistry, true);
+  }
 }
   }