hbase git commit: HBASE-21034 Add new throttle type: read/write capacity unit

2018-11-20 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 6a64811f4 -> d590d6e47


HBASE-21034 Add new throttle type: read/write capacity unit

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d590d6e4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d590d6e4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d590d6e4

Branch: refs/heads/branch-2
Commit: d590d6e4720154b79184c5b7be862b76c8c892ea
Parents: 6a64811
Author: meiyi 
Authored: Mon Nov 19 17:17:30 2018 +0800
Committer: Guanghao Zhang 
Committed: Wed Nov 21 09:50:07 2018 +0800

--
 .../hbase/quotas/QuotaSettingsFactory.java  | 12 +++
 .../hadoop/hbase/quotas/ThrottleSettings.java   |  6 ++
 .../hadoop/hbase/quotas/ThrottleType.java   |  9 ++
 .../hbase/shaded/protobuf/ProtobufUtil.java | 56 
 .../src/main/protobuf/Quota.proto   |  7 ++
 .../hbase/quotas/DefaultOperationQuota.java | 71 +++
 .../hbase/quotas/GlobalQuotaSettingsImpl.java   | 27 ++
 .../hadoop/hbase/quotas/NoopQuotaLimiter.java   | 11 +--
 .../hadoop/hbase/quotas/QuotaLimiter.java   | 18 ++--
 .../apache/hadoop/hbase/quotas/QuotaUtil.java   |  7 ++
 .../quotas/RegionServerRpcQuotaManager.java |  5 +-
 .../hadoop/hbase/quotas/TimeBasedLimiter.java   | 94 +---
 .../hadoop/hbase/quotas/TestQuotaAdmin.java | 24 -
 .../hadoop/hbase/quotas/TestQuotaState.java |  8 +-
 .../hadoop/hbase/quotas/TestQuotaThrottle.java  | 66 +-
 hbase-shell/src/main/ruby/hbase/quotas.rb   |  5 +-
 .../src/main/ruby/shell/commands/set_quota.rb   | 10 ++-
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  | 27 ++
 18 files changed, 396 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d590d6e4/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 2a20c51..14d1ad3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -143,6 +143,18 @@ public class QuotaSettingsFactory {
   settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
   ThrottleType.READ_SIZE, throttle.getReadSize()));
 }
+if (throttle.hasReqCapacityUnit()) {
+  settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
+ThrottleType.REQUEST_CAPACITY_UNIT, throttle.getReqCapacityUnit()));
+}
+if (throttle.hasReadCapacityUnit()) {
+  settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
+ThrottleType.READ_CAPACITY_UNIT, throttle.getReadCapacityUnit()));
+}
+if (throttle.hasWriteCapacityUnit()) {
+  settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
+ThrottleType.WRITE_CAPACITY_UNIT, throttle.getWriteCapacityUnit()));
+}
 return settings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d590d6e4/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
index e424d8a..05fb70b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
@@ -95,6 +95,12 @@ class ThrottleSettings extends QuotaSettings {
   case READ_SIZE:
 builder.append(sizeToString(timedQuota.getSoftLimit()));
 break;
+  case REQUEST_CAPACITY_UNIT:
+  case READ_CAPACITY_UNIT:
+  case WRITE_CAPACITY_UNIT:
+builder.append(String.format("%dCU", timedQuota.getSoftLimit()));
+break;
+  default:
 }
   } else if (timedQuota.hasShare()) {
 builder.append(String.format("%.2f%%", timedQuota.getShare()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/d590d6e4/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
index 0b0ee60..ec5b32d 100644
--- 
a/hbase-client/sr

hbase git commit: HBASE-21034 Add new throttle type: read/write capacity unit

2018-11-20 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 405bf5e63 -> 5ded29441


HBASE-21034 Add new throttle type: read/write capacity unit

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ded2944
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ded2944
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ded2944

Branch: refs/heads/master
Commit: 5ded2944199f27440a46df6f200ff2a31c1b8728
Parents: 405bf5e
Author: meiyi 
Authored: Mon Nov 19 17:17:30 2018 +0800
Committer: Guanghao Zhang 
Committed: Wed Nov 21 09:46:49 2018 +0800

--
 .../hbase/quotas/QuotaSettingsFactory.java  | 12 +++
 .../hadoop/hbase/quotas/ThrottleSettings.java   |  6 ++
 .../hadoop/hbase/quotas/ThrottleType.java   |  9 ++
 .../hbase/shaded/protobuf/ProtobufUtil.java | 56 
 .../src/main/protobuf/Quota.proto   |  7 ++
 .../hbase/quotas/DefaultOperationQuota.java | 71 +++
 .../hbase/quotas/GlobalQuotaSettingsImpl.java   | 27 ++
 .../hadoop/hbase/quotas/NoopQuotaLimiter.java   | 11 +--
 .../hadoop/hbase/quotas/QuotaLimiter.java   | 18 ++--
 .../apache/hadoop/hbase/quotas/QuotaUtil.java   |  7 ++
 .../quotas/RegionServerRpcQuotaManager.java |  5 +-
 .../hadoop/hbase/quotas/TimeBasedLimiter.java   | 94 +---
 .../hadoop/hbase/quotas/TestQuotaAdmin.java | 24 -
 .../hadoop/hbase/quotas/TestQuotaState.java |  8 +-
 .../hadoop/hbase/quotas/TestQuotaThrottle.java  | 66 +-
 hbase-shell/src/main/ruby/hbase/quotas.rb   |  5 +-
 .../src/main/ruby/shell/commands/set_quota.rb   | 10 ++-
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  | 27 ++
 18 files changed, 396 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 2a20c51..14d1ad3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -143,6 +143,18 @@ public class QuotaSettingsFactory {
   settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
   ThrottleType.READ_SIZE, throttle.getReadSize()));
 }
+if (throttle.hasReqCapacityUnit()) {
+  settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
+ThrottleType.REQUEST_CAPACITY_UNIT, throttle.getReqCapacityUnit()));
+}
+if (throttle.hasReadCapacityUnit()) {
+  settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
+ThrottleType.READ_CAPACITY_UNIT, throttle.getReadCapacityUnit()));
+}
+if (throttle.hasWriteCapacityUnit()) {
+  settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, 
namespace,
+ThrottleType.WRITE_CAPACITY_UNIT, throttle.getWriteCapacityUnit()));
+}
 return settings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
index e424d8a..05fb70b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
@@ -95,6 +95,12 @@ class ThrottleSettings extends QuotaSettings {
   case READ_SIZE:
 builder.append(sizeToString(timedQuota.getSoftLimit()));
 break;
+  case REQUEST_CAPACITY_UNIT:
+  case READ_CAPACITY_UNIT:
+  case WRITE_CAPACITY_UNIT:
+builder.append(String.format("%dCU", timedQuota.getSoftLimit()));
+break;
+  default:
 }
   } else if (timedQuota.hasShare()) {
 builder.append(String.format("%.2f%%", timedQuota.getShare()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
index 0b0ee60..ec5b32d 100644
--- 
a/hbase-client/src/ma

[08/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.Loader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.Loader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.Loader.html
index 257263c..7a81ab8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.Loader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.Loader.html
@@ -96,183 +96,180 @@
 088  Loader loader) throws IOException 
{
 089ProcedureWALFormatReader reader = new 
ProcedureWALFormatReader(tracker, loader);
 090tracker.setKeepDeletes(true);
-091try {
-092  // Ignore the last log which is 
current active log.
-093  while (logs.hasNext()) {
-094ProcedureWALFile log = 
logs.next();
-095log.open();
-096try {
-097  reader.read(log);
-098} finally {
-099  log.close();
-100}
-101  }
-102  reader.finish();
-103
-104  // The tracker is now updated with 
all the procedures read from the logs
-105  if (tracker.isPartial()) {
-106tracker.setPartialFlag(false);
-107  }
-108  tracker.resetModified();
-109} finally {
-110  tracker.setKeepDeletes(false);
-111}
-112  }
-113
-114  public static void 
writeHeader(OutputStream stream, ProcedureWALHeader header)
-115  throws IOException {
-116header.writeDelimitedTo(stream);
-117  }
-118
-119  /*
-120   * +-+
-121   * | END OF WAL DATA | <---+
-122   * +-+ |
-123   * | | |
-124   * | Tracker | |
-125   * | | |
-126   * +-+ |
-127   * | version | |
-128   * +-+ |
-129   * |  TRAILER_MAGIC  | |
-130   * +-+ |
-131   * |  offset |-+
-132   * +-+
-133   */
-134  public static long 
writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
-135  throws IOException {
-136long offset = stream.getPos();
-137
-138// Write EOF Entry
-139ProcedureWALEntry.newBuilder()
-140  
.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF)
-141  
.build().writeDelimitedTo(stream);
+091// Ignore the last log which is 
current active log.
+092while (logs.hasNext()) {
+093  ProcedureWALFile log = 
logs.next();
+094  log.open();
+095  try {
+096reader.read(log);
+097  } finally {
+098log.close();
+099  }
+100}
+101reader.finish();
+102
+103// The tracker is now updated with 
all the procedures read from the logs
+104if (tracker.isPartial()) {
+105  tracker.setPartialFlag(false);
+106}
+107tracker.resetModified();
+108tracker.setKeepDeletes(false);
+109  }
+110
+111  public static void 
writeHeader(OutputStream stream, ProcedureWALHeader header)
+112  throws IOException {
+113header.writeDelimitedTo(stream);
+114  }
+115
+116  /*
+117   * +-+
+118   * | END OF WAL DATA | <---+
+119   * +-+ |
+120   * | | |
+121   * | Tracker | |
+122   * | | |
+123   * +-+ |
+124   * | version | |
+125   * +-+ |
+126   * |  TRAILER_MAGIC  | |
+127   * +-+ |
+128   * |  offset |-+
+129   * +-+
+130   */
+131  public static long 
writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
+132  throws IOException {
+133long offset = stream.getPos();
+134
+135// Write EOF Entry
+136ProcedureWALEntry.newBuilder()
+137  
.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF)
+138  
.build().writeDelimitedTo(stream);
+139
+140// Write Tracker
+141
tracker.toProto().writeDelimitedTo(stream);
 142
-143// Write Tracker
-144
tracker.toProto().writeDelimitedTo(stream);
-145
-146stream.write(TRAILER_VERSION);
-147StreamUtils.writeLong(stream, 
TRAILER_MAGIC);
-148StreamUtils.writeLong(stream, 
offset);
-149return stream.getPos() - offset;
-150  }
-151
-152  public static ProcedureWALHeader 
readHeader(InputStream stream)
-153  throws IOException {
-154ProcedureWALHeader header;
-155try {
-156  header = 
ProcedureWALHeader.parseDelimitedFrom(stream);
-157} catch 
(InvalidProtocolBufferException e) {
-158  throw new 
InvalidWALDataException(e);
-159}
-160
-161if (header == null) {
-162  throw new 
InvalidWALDataException("No data available to read the Header");
-163}
-164
-165if (header.getVersion() < 0 || 
header.getVersion() != HEADER_VERSION) {
-166  throw new 
InvalidWALDataException("Invali

[07/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
index ab175b6..72a3459 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
@@ -456,969 +456,979 @@
 448lock.lock();
 449try {
 450  if (logs.isEmpty()) {
-451throw new 
RuntimeException("recoverLease() must be called before loading data");
+451throw new 
IllegalStateException("recoverLease() must be called before loading data");
 452  }
 453
 454  // Nothing to do, If we have only 
the current log.
 455  if (logs.size() == 1) {
 456LOG.debug("No state logs to 
replay.");
 457loader.setMaxProcId(0);
-458return;
-459  }
-460
-461  // Load the old logs
-462  Iterator it 
= logs.descendingIterator();
-463  it.next(); // Skip the current 
log
-464
-465  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-466
-467@Override
-468public void setMaxProcId(long 
maxProcId) {
-469  
loader.setMaxProcId(maxProcId);
-470}
-471
-472@Override
-473public void 
load(ProcedureIterator procIter) throws IOException {
-474  loader.load(procIter);
-475}
-476
-477@Override
-478public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-479  
loader.handleCorrupted(procIter);
-480}
-481
-482@Override
-483public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-484  if (corruptedLogs == null) {
-485corruptedLogs = new 
HashSet<>();
-486  }
-487  corruptedLogs.add(log);
-488  // TODO: sideline corrupted 
log
-489}
-490  });
-491} finally {
-492  try {
-493// try to cleanup inactive wals 
and complete the operation
-494buildHoldingCleanupTracker();
-495tryCleanupLogsOnLoad();
-496loading.set(false);
-497  } finally {
-498lock.unlock();
-499  }
-500}
-501  }
-502
-503  private void tryCleanupLogsOnLoad() {
-504// nothing to cleanup.
-505if (logs.size() <= 1) {
-506  return;
-507}
+458loading.set(false);
+459return;
+460  }
+461
+462  // Load the old logs
+463  Iterator it 
= logs.descendingIterator();
+464  it.next(); // Skip the current 
log
+465
+466  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
+467
+468@Override
+469public void setMaxProcId(long 
maxProcId) {
+470  
loader.setMaxProcId(maxProcId);
+471}
+472
+473@Override
+474public void 
load(ProcedureIterator procIter) throws IOException {
+475  loader.load(procIter);
+476}
+477
+478@Override
+479public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
+480  
loader.handleCorrupted(procIter);
+481}
+482
+483@Override
+484public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
+485  if (corruptedLogs == null) {
+486corruptedLogs = new 
HashSet<>();
+487  }
+488  corruptedLogs.add(log);
+489  // TODO: sideline corrupted 
log
+490}
+491  });
+492  // if we fail when loading, we 
should prevent persisting the storeTracker later in the stop
+493  // method. As it may happen that, 
we have finished constructing the modified and deleted bits,
+494  // but before we call 
resetModified, we fail, then if we persist the storeTracker then when
+495  // restarting, we will consider 
that all procedures have been included in this file and delete
+496  // all the previous files. 
Obviously this not correct. So here we will only set loading to
+497  // false when we successfully 
loaded all the procedures, and when closing we will skip
+498  // persisting the store tracker. 
And also, this will prevent the sync thread to do
+499  // periodicRoll, where we may also 
clean old logs.
+500  loading.set(false);
+501  // try to cleanup inactive wals and 
complete the operation
+502  buildHoldingCleanupTracker();
+503  tryCleanupLogsOnLoad();
+504} finally {
+505  lock.unlock();
+506}
+507  }
 508
-509// the config says to not cleanup 
wals on load.
-510if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-511  
DEFAULT_EXEC_WA

[05/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
index ab175b6..72a3459 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
@@ -456,969 +456,979 @@
 448lock.lock();
 449try {
 450  if (logs.isEmpty()) {
-451throw new 
RuntimeException("recoverLease() must be called before loading data");
+451throw new 
IllegalStateException("recoverLease() must be called before loading data");
 452  }
 453
 454  // Nothing to do, If we have only 
the current log.
 455  if (logs.size() == 1) {
 456LOG.debug("No state logs to 
replay.");
 457loader.setMaxProcId(0);
-458return;
-459  }
-460
-461  // Load the old logs
-462  Iterator it 
= logs.descendingIterator();
-463  it.next(); // Skip the current 
log
-464
-465  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-466
-467@Override
-468public void setMaxProcId(long 
maxProcId) {
-469  
loader.setMaxProcId(maxProcId);
-470}
-471
-472@Override
-473public void 
load(ProcedureIterator procIter) throws IOException {
-474  loader.load(procIter);
-475}
-476
-477@Override
-478public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-479  
loader.handleCorrupted(procIter);
-480}
-481
-482@Override
-483public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-484  if (corruptedLogs == null) {
-485corruptedLogs = new 
HashSet<>();
-486  }
-487  corruptedLogs.add(log);
-488  // TODO: sideline corrupted 
log
-489}
-490  });
-491} finally {
-492  try {
-493// try to cleanup inactive wals 
and complete the operation
-494buildHoldingCleanupTracker();
-495tryCleanupLogsOnLoad();
-496loading.set(false);
-497  } finally {
-498lock.unlock();
-499  }
-500}
-501  }
-502
-503  private void tryCleanupLogsOnLoad() {
-504// nothing to cleanup.
-505if (logs.size() <= 1) {
-506  return;
-507}
+458loading.set(false);
+459return;
+460  }
+461
+462  // Load the old logs
+463  Iterator it 
= logs.descendingIterator();
+464  it.next(); // Skip the current 
log
+465
+466  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
+467
+468@Override
+469public void setMaxProcId(long 
maxProcId) {
+470  
loader.setMaxProcId(maxProcId);
+471}
+472
+473@Override
+474public void 
load(ProcedureIterator procIter) throws IOException {
+475  loader.load(procIter);
+476}
+477
+478@Override
+479public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
+480  
loader.handleCorrupted(procIter);
+481}
+482
+483@Override
+484public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
+485  if (corruptedLogs == null) {
+486corruptedLogs = new 
HashSet<>();
+487  }
+488  corruptedLogs.add(log);
+489  // TODO: sideline corrupted 
log
+490}
+491  });
+492  // if we fail when loading, we 
should prevent persisting the storeTracker later in the stop
+493  // method. As it may happen that, 
we have finished constructing the modified and deleted bits,
+494  // but before we call 
resetModified, we fail, then if we persist the storeTracker then when
+495  // restarting, we will consider 
that all procedures have been included in this file and delete
+496  // all the previous files. 
Obviously this not correct. So here we will only set loading to
+497  // false when we successfully 
loaded all the procedures, and when closing we will skip
+498  // persisting the store tracker. 
And also, this will prevent the sync thread to do
+499  // periodicRoll, where we may also 
clean old logs.
+500  loading.set(false);
+501  // try to cleanup inactive wals and 
complete the operation
+502  buildHoldingCleanupTracker();
+503  tryCleanupLogsOnLoad();
+504} finally {
+505  lock.unlock();
+506}
+507  }
 508
-509// the config says to not cleanup 
wals on load.
-510if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-511  
DEFAULT_EXEC_WAL_CLEANUP_

hbase-site git commit: INFRA-10751 Empty commit

2018-11-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c8b83ace9 -> af942ccb6


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/af942ccb
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/af942ccb
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/af942ccb

Branch: refs/heads/asf-site
Commit: af942ccb6b62893f47d23504c26edb3aac6ecd9e
Parents: c8b83ac
Author: jenkins 
Authored: Tue Nov 20 14:52:55 2018 +
Committer: jenkins 
Committed: Tue Nov 20 14:52:55 2018 +

--

--




[04/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index ab175b6..72a3459 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -456,969 +456,979 @@
 448lock.lock();
 449try {
 450  if (logs.isEmpty()) {
-451throw new 
RuntimeException("recoverLease() must be called before loading data");
+451throw new 
IllegalStateException("recoverLease() must be called before loading data");
 452  }
 453
 454  // Nothing to do, If we have only 
the current log.
 455  if (logs.size() == 1) {
 456LOG.debug("No state logs to 
replay.");
 457loader.setMaxProcId(0);
-458return;
-459  }
-460
-461  // Load the old logs
-462  Iterator it 
= logs.descendingIterator();
-463  it.next(); // Skip the current 
log
-464
-465  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-466
-467@Override
-468public void setMaxProcId(long 
maxProcId) {
-469  
loader.setMaxProcId(maxProcId);
-470}
-471
-472@Override
-473public void 
load(ProcedureIterator procIter) throws IOException {
-474  loader.load(procIter);
-475}
-476
-477@Override
-478public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-479  
loader.handleCorrupted(procIter);
-480}
-481
-482@Override
-483public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-484  if (corruptedLogs == null) {
-485corruptedLogs = new 
HashSet<>();
-486  }
-487  corruptedLogs.add(log);
-488  // TODO: sideline corrupted 
log
-489}
-490  });
-491} finally {
-492  try {
-493// try to cleanup inactive wals 
and complete the operation
-494buildHoldingCleanupTracker();
-495tryCleanupLogsOnLoad();
-496loading.set(false);
-497  } finally {
-498lock.unlock();
-499  }
-500}
-501  }
-502
-503  private void tryCleanupLogsOnLoad() {
-504// nothing to cleanup.
-505if (logs.size() <= 1) {
-506  return;
-507}
+458loading.set(false);
+459return;
+460  }
+461
+462  // Load the old logs
+463  Iterator it 
= logs.descendingIterator();
+464  it.next(); // Skip the current 
log
+465
+466  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
+467
+468@Override
+469public void setMaxProcId(long 
maxProcId) {
+470  
loader.setMaxProcId(maxProcId);
+471}
+472
+473@Override
+474public void 
load(ProcedureIterator procIter) throws IOException {
+475  loader.load(procIter);
+476}
+477
+478@Override
+479public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
+480  
loader.handleCorrupted(procIter);
+481}
+482
+483@Override
+484public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
+485  if (corruptedLogs == null) {
+486corruptedLogs = new 
HashSet<>();
+487  }
+488  corruptedLogs.add(log);
+489  // TODO: sideline corrupted 
log
+490}
+491  });
+492  // if we fail when loading, we 
should prevent persisting the storeTracker later in the stop
+493  // method. As it may happen that, 
we have finished constructing the modified and deleted bits,
+494  // but before we call 
resetModified, we fail, then if we persist the storeTracker then when
+495  // restarting, we will consider 
that all procedures have been included in this file and delete
+496  // all the previous files. 
Obviously this not correct. So here we will only set loading to
+497  // false when we successfully 
loaded all the procedures, and when closing we will skip
+498  // persisting the store tracker. 
And also, this will prevent the sync thread to do
+499  // periodicRoll, where we may also 
clean old logs.
+500  loading.set(false);
+501  // try to cleanup inactive wals and 
complete the operation
+502  buildHoldingCleanupTracker();
+503  tryCleanupLogsOnLoad();
+504} finally {
+505  lock.unlock();
+506}
+507  }
 508
-509// the config says to not cleanup 
wals on load.
-510if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-511  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
-512  LOG.debug("WALs cleanup on lo

[02/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/testdevapidocs/org/apache/hadoop/hbase/master/TestLoadProcedureError.TestProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/TestLoadProcedureError.TestProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/TestLoadProcedureError.TestProcedure.html
new file mode 100644
index 000..11faf99
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/TestLoadProcedureError.TestProcedure.html
@@ -0,0 +1,421 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TestLoadProcedureError.TestProcedure (Apache HBase 3.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master
+Class 
TestLoadProcedureError.TestProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.Procedure
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure
+
+
+org.apache.hadoop.hbase.master.TestLoadProcedureError.TestProcedure
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in 
java.lang">Comparable>,
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+
+
+Enclosing class:
+TestLoadProcedureError
+
+
+
+public static final class TestLoadProcedureError.TestProcedure
+extends ProcedureTestingUtility.NoopProcedure
+implements 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID, NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+TestProcedure() 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+protected void
+afterReplay(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv env) 
+
+
+protected 
org.apache.hadoop.hbase.procedure2.Procedure[]
+execute(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv env) 
+
+
+org.apache.hadoop.hbase.TableName
+getTableName() 
+
+
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+getTableOperationType() 
+
+
+protected boolean
+setTimeoutFailure(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv env) 
+
+
+
+
+
+
+Methods inherited from class org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure
+abort,
 deserializeStateData,
 rollback,
 serializeStateData
+
+
+
+
+
+Methods inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+acquireLock, addStackIndex, beforeReplay, bypass, compareTo, 
completionCleanup, doExecute, doRollback, elapsedTime, getChildrenLatch, 
getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, 
getProcedureMetrics, getProcId, getProcIdHashCode, getProcName, getResult, 
getRootProcedureId, getRootProcId, getStackIndexes, getState, getSubmittedTime, 
getTimeout, getTimeoutTimestamp, has

[10/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index a2f36de..f6f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -1489,7 +1489,7 @@ extends 
 
 WALS_PATH_FILTER
-private static final org.apache.hadoop.fs.PathFilter WALS_PATH_FILTER
+private static final org.apache.hadoop.fs.PathFilter WALS_PATH_FILTER
 
 
 
@@ -1498,7 +1498,7 @@ extends 
 
 FILE_STATUS_ID_COMPARATOR
-private static final https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in 
java.util">Comparator FILE_STATUS_ID_COMPARATOR
+private static final https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in 
java.util">Comparator FILE_STATUS_ID_COMPARATOR
 
 
 
@@ -1685,7 +1685,7 @@ extends 
 
 tryCleanupLogsOnLoad
-private void tryCleanupLogsOnLoad()
+private void tryCleanupLogsOnLoad()
 
 
 
@@ -1694,7 +1694,7 @@ extends 
 
 insert
-public void insert(Procedure proc,
+public void insert(Procedure proc,
Procedure[] subprocs)
 Description copied from 
interface: ProcedureStore
 When a procedure is submitted to the executor insert(proc, 
null) will be called.
@@ -1716,7 +1716,7 @@ extends 
 
 insert
-public void insert(Procedure[] procs)
+public void insert(Procedure[] procs)
 Description copied from 
interface: ProcedureStore
 Serialize a set of new procedures.
  These procedures are freshly submitted to the executor and each procedure
@@ -1733,7 +1733,7 @@ extends 
 
 update
-public void update(Procedure proc)
+public void update(Procedure proc)
 Description copied from 
interface: ProcedureStore
 The specified procedure was executed,
  and the new state should be written to the store.
@@ -1749,7 +1749,7 @@ extends 
 
 delete
-public void delete(long procId)
+public void delete(long procId)
 Description copied from 
interface: ProcedureStore
 The specified procId was removed from the executor,
  due to completion, abort or failure.
@@ -1766,7 +1766,7 @@ extends 
 
 delete
-public void delete(Procedure proc,
+public void delete(Procedure proc,
long[] subProcIds)
 Description copied from 
interface: ProcedureStore
 The parent procedure completed.
@@ -1784,7 +1784,7 @@ extends 
 
 delete
-public void delete(long[] procIds,
+public void delete(long[] procIds,
int offset,
int count)
 Description copied from 
interface: ProcedureStore
@@ -1805,7 +1805,7 @@ extends 
 
 delete
-private void delete(long[] procIds)
+private void delete(long[] procIds)
 
 
 
@@ -1814,7 +1814,7 @@ extends 
 
 acquireSlot
-private ByteSlot acquireSlot()
+private ByteSlot acquireSlot()
 
 
 
@@ -1823,7 +1823,7 @@ extends 
 
 releaseSlot
-private void releaseSlot(ByteSlot slot)
+private void releaseSlot(ByteSlot slot)
 
 
 
@@ -1832,7 +1832,7 @@ extends 
 
 pushData
-private long pushData(WALProcedureStore.PushType type,
+private long pushData(WALProcedureStore.PushType type,
   ByteSlot slot,
   long procId,
   long[] subProcIds)
@@ -1844,7 +1844,7 @@ extends 
 
 updateStoreTracker
-private void updateStoreTracker(WALProcedureStore.PushType type,
+private void updateStoreTracker(WALProcedureStore.PushType type,
 long procId,
 long[] subProcIds)
 
@@ -1855,7 +1855,7 @@ extends 
 
 isSyncAborted
-private boolean isSyncAborted()
+private boolean isSyncAborted()
 
 
 
@@ -1864,7 +1864,7 @@ extends 
 
 syncLoop
-private void syncLoop()
+private void syncLoop()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
 
 Throws:
@@ -1878,7 +1878,7 @@ extends 
 
 getSyncMetrics
-public https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList getSyncMetrics()
+public https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList getSyncMetrics()
 
 
 
@@ -1887,7 +1887,7 @@ extends 
 
 syncSlots
-private long syncSlots()
+private long syncSlots()
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.

[03/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 6e87490..3d1fa93 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase Downloads
 
@@ -461,7 +461,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 71012cf..f6570f0 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -341,7 +341,7 @@ for more details.
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/index.html
--
diff --git a/index.html b/index.html
index 8e04166..048ec54 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase™ Home
 
@@ -421,7 +421,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/integration.html
--
diff --git a/integration.html b/integration.html
index 212ba4d..fe14463 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – CI Management
 
@@ -301,7 +301,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index 452ee46..ca30679 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Issue Management
 
@@ -298,7 +298,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/license.html
--
diff --git a/license.html b/license.html
index 9175f67..874a038 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Licenses
 
@@ -501,7 +501,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index 3687bf6..3214b3e 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Mailing Lists
 
@@ -351,7 +351,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-19
+  Last Published: 
2018-11-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 0cdbfd0..c997bd0 100644
--- a/metrics.html
+++ b/m

[06/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index ab175b6..72a3459 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -456,969 +456,979 @@
 448lock.lock();
 449try {
 450  if (logs.isEmpty()) {
-451throw new 
RuntimeException("recoverLease() must be called before loading data");
+451throw new 
IllegalStateException("recoverLease() must be called before loading data");
 452  }
 453
 454  // Nothing to do, If we have only 
the current log.
 455  if (logs.size() == 1) {
 456LOG.debug("No state logs to 
replay.");
 457loader.setMaxProcId(0);
-458return;
-459  }
-460
-461  // Load the old logs
-462  Iterator it 
= logs.descendingIterator();
-463  it.next(); // Skip the current 
log
-464
-465  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-466
-467@Override
-468public void setMaxProcId(long 
maxProcId) {
-469  
loader.setMaxProcId(maxProcId);
-470}
-471
-472@Override
-473public void 
load(ProcedureIterator procIter) throws IOException {
-474  loader.load(procIter);
-475}
-476
-477@Override
-478public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-479  
loader.handleCorrupted(procIter);
-480}
-481
-482@Override
-483public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-484  if (corruptedLogs == null) {
-485corruptedLogs = new 
HashSet<>();
-486  }
-487  corruptedLogs.add(log);
-488  // TODO: sideline corrupted 
log
-489}
-490  });
-491} finally {
-492  try {
-493// try to cleanup inactive wals 
and complete the operation
-494buildHoldingCleanupTracker();
-495tryCleanupLogsOnLoad();
-496loading.set(false);
-497  } finally {
-498lock.unlock();
-499  }
-500}
-501  }
-502
-503  private void tryCleanupLogsOnLoad() {
-504// nothing to cleanup.
-505if (logs.size() <= 1) {
-506  return;
-507}
+458loading.set(false);
+459return;
+460  }
+461
+462  // Load the old logs
+463  Iterator it 
= logs.descendingIterator();
+464  it.next(); // Skip the current 
log
+465
+466  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
+467
+468@Override
+469public void setMaxProcId(long 
maxProcId) {
+470  
loader.setMaxProcId(maxProcId);
+471}
+472
+473@Override
+474public void 
load(ProcedureIterator procIter) throws IOException {
+475  loader.load(procIter);
+476}
+477
+478@Override
+479public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
+480  
loader.handleCorrupted(procIter);
+481}
+482
+483@Override
+484public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
+485  if (corruptedLogs == null) {
+486corruptedLogs = new 
HashSet<>();
+487  }
+488  corruptedLogs.add(log);
+489  // TODO: sideline corrupted 
log
+490}
+491  });
+492  // if we fail when loading, we 
should prevent persisting the storeTracker later in the stop
+493  // method. As it may happen that, 
we have finished constructing the modified and deleted bits,
+494  // but before we call 
resetModified, we fail, then if we persist the storeTracker then when
+495  // restarting, we will consider 
that all procedures have been included in this file and delete
+496  // all the previous files. 
Obviously this not correct. So here we will only set loading to
+497  // false when we successfully 
loaded all the procedures, and when closing we will skip
+498  // persisting the store tracker. 
And also, this will prevent the sync thread to do
+499  // periodicRoll, where we may also 
clean old logs.
+500  loading.set(false);
+501  // try to cleanup inactive wals and 
complete the operation
+502  buildHoldingCleanupTracker();
+503  tryCleanupLogsOnLoad();
+504} finally {
+505  lock.unlock();
+506}
+507  }
 508
-509// the config says to not cleanup 
wals on load.
-510if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-511  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KE

[01/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c90f2bbe9 -> c8b83ace9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 1fc7c25..46cc200 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -234,10 +234,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.procedure2.TestProcedureBypass.StuckStateMachineState
-org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State
-org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State
 org.apache.hadoop.hbase.procedure2.TestStateMachineProcedure.TestSMProcedureState
+org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State
+org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State
+org.apache.hadoop.hbase.procedure2.TestProcedureBypass.StuckStateMachineState
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-use.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
index 72cc2b4..eda5b71 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
@@ -87,14 +87,18 @@
  
 
 
-org.apache.hadoop.hbase.master.procedure
+org.apache.hadoop.hbase.master
  
 
 
-org.apache.hadoop.hbase.procedure2
+org.apache.hadoop.hbase.master.procedure
  
 
 
+org.apache.hadoop.hbase.procedure2
+ 
+
+
 org.apache.hadoop.hbase.procedure2.store.wal
  
 
@@ -116,6 +120,21 @@
 
 
 
+
+
+
+
+Classes in org.apache.hadoop.hbase.procedure2
 used by org.apache.hadoop.hbase.master 
+
+Class and Description
+
+
+
+ProcedureTestingUtility.NoopProcedure 
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 5de4a37..f85d0ea 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -701,10 +701,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
 org.apache.hadoop.hbase.regionserver.TestMultiLogThreshold.ActionType
-org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.CacheOnWriteType
 org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep
+org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.CacheOnWriteType
+org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
 org.apache.hadoop.hbase.regionserver.TestRegionServerReadRequestMetrics.Metric
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/testdevapidocs/overview-tree.html
--
diff --git a/testdevapidocs/overview-tree.html 
b/testdevapidocs/overview-tree.html
index b857403..b891855 100644
--- a/testdevapidocs/overview-tree.html
+++ b/testdevapidocs/overview-tree.html
@@ -1776,6 +1776,7 @@
 org.apache.hadoop.hbase.procedure2.TestForceUpdateProcedure.ParentProcedure
 org.apache.hadoop.hbase.procedure2.TestForceUpdateProcedure.WaitingProcedure
 org.apache.hadoop.hbase.client.TestHbck.SuspendProcedure (implements 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface)
+org.apache.hadoop.hbase.master.TestLoa

[11/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/c8b83ace
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/c8b83ace
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/c8b83ace

Branch: refs/heads/asf-site
Commit: c8b83ace98bd4d21e72abed5fa0cb09f7bd38d2b
Parents: c90f2bb
Author: jenkins 
Authored: Tue Nov 20 14:52:40 2018 +
Committer: jenkins 
Committed: Tue Nov 20 14:52:40 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |   18 +-
 checkstyle.rss  |   16 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |4 +-
 .../procedure2/store/ProcedureStoreTracker.html |   30 +-
 .../store/wal/ProcedureWALFormat.html   |   22 +-
 .../store/wal/WALProcedureStore.PushType.html   |   12 +-
 .../procedure2/store/wal/WALProcedureStore.html |   92 +-
 .../org/apache/hadoop/hbase/Version.html|4 +-
 .../ProcedureStoreTracker.DeleteState.html  |  413 ++--
 .../procedure2/store/ProcedureStoreTracker.html |  413 ++--
 ...cedureWALFormat.InvalidWALDataException.html |  345 ++--
 .../store/wal/ProcedureWALFormat.Loader.html|  345 ++--
 .../store/wal/ProcedureWALFormat.html   |  345 ++--
 .../wal/WALProcedureStore.LeaseRecovery.html| 1876 +-
 .../store/wal/WALProcedureStore.PushType.html   | 1876 +-
 .../wal/WALProcedureStore.SyncMetrics.html  | 1876 +-
 .../procedure2/store/wal/WALProcedureStore.html | 1876 +-
 downloads.html  |4 +-
 export_control.html |4 +-
 index.html  |4 +-
 integration.html|4 +-
 issue-tracking.html |4 +-
 license.html|4 +-
 mail-lists.html |4 +-
 metrics.html|4 +-
 old_news.html   |4 +-
 plugin-management.html  |4 +-
 plugins.html|4 +-
 poweredbyhbase.html |4 +-
 project-info.html   |4 +-
 project-reports.html|4 +-
 project-summary.html|4 +-
 pseudo-distributed.html |4 +-
 replication.html|4 +-
 resources.html  |4 +-
 source-repository.html  |4 +-
 sponsors.html   |4 +-
 supportingprojects.html |4 +-
 team-list.html  |4 +-
 testdevapidocs/allclasses-frame.html|2 +
 testdevapidocs/allclasses-noframe.html  |2 +
 testdevapidocs/index-all.html   |   40 +
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hbase/class-use/HBaseClassTestRule.html |   16 +-
 .../hbase/class-use/HBaseTestingUtility.html|6 +-
 .../hadoop/hbase/io/hfile/package-tree.html |2 +-
 .../hbase/master/TestHMasterRPCException.html   |4 +-
 .../TestLoadProcedureError.TestProcedure.html   |  421 
 .../hbase/master/TestLoadProcedureError.html|  447 +
 .../apache/hadoop/hbase/master/TestMaster.html  |4 +-
 .../TestLoadProcedureError.TestProcedure.html   |  125 ++
 .../class-use/TestLoadProcedureError.html   |  125 ++
 .../hadoop/hbase/master/package-frame.html  |2 +
 .../hadoop/hbase/master/package-summary.html|   10 +
 .../hadoop/hbase/master/package-tree.html   |6 +
 .../org/apache/hadoop/hbase/package-tree.html   |   12 +-
 .../hadoop/hbase/procedure/package-tree.html|8 +-
 .../ProcedureTestingUtility.NoopProcedure.html  |2 +-
 .../ProcedureTestingUtility.NoopProcedure.html  |   24 +-
 .../hadoop/hbase/procedure2/package-tree.html   |6 +-
 .../hadoop/hbase/procedure2/package-use.html|   23 +-
 .../hadoop/hbase/regionserver/package-tree.html |4 +-
 testdevapidocs/overview-tree.html   |2 +
 .../TestLoadProcedureError.TestProcedure.html   |  

[09/11] hbase-site git commit: Published site at 405bf5e6383a09f435baadbac6c389e9f6c43ac6.

2018-11-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c8b83ace/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
index 5fc97ee..2cc0fc9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
@@ -282,212 +282,213 @@
 274this.keepDeletes = false;
 275this.partial = false;
 276this.map.clear();
-277resetModified();
-278  }
-279
-280  public boolean isModified(long procId) 
{
-281final Map.Entry entry = map.floorEntry(procId);
-282return entry != null && 
entry.getValue().contains(procId) &&
-283  
entry.getValue().isModified(procId);
-284  }
-285
-286  /**
-287   * If {@link #partial} is false, 
returns state from the bitmap. If no state is found for
-288   * {@code procId}, returns YES.
-289   * If partial is true, tracker doesn't 
have complete view of system state, so it returns MAYBE
-290   * if there is no update for the 
procedure or if it doesn't have a state in bitmap. Otherwise,
-291   * returns state from the bitmap.
-292   */
-293  public DeleteState isDeleted(long 
procId) {
-294Map.Entry 
entry = map.floorEntry(procId);
-295if (entry != null && 
entry.getValue().contains(procId)) {
-296  BitSetNode node = 
entry.getValue();
-297  DeleteState state = 
node.isDeleted(procId);
-298  return partial && 
!node.isModified(procId) ? DeleteState.MAYBE : state;
-299}
-300return partial ? DeleteState.MAYBE : 
DeleteState.YES;
-301  }
-302
-303  public long getActiveMinProcId() {
-304Map.Entry 
entry = map.firstEntry();
-305return entry == null ? 
Procedure.NO_PROC_ID : entry.getValue().getActiveMinProcId();
-306  }
-307
-308  public void setKeepDeletes(boolean 
keepDeletes) {
-309this.keepDeletes = keepDeletes;
-310// If not to keep deletes, remove the 
BitSetNodes which are empty (i.e. contains ids of deleted
-311// procedures).
-312if (!keepDeletes) {
-313  Iterator> it = map.entrySet().iterator();
-314  while (it.hasNext()) {
-315Map.Entry 
entry = it.next();
-316if (entry.getValue().isEmpty()) 
{
-317  it.remove();
-318}
-319  }
-320}
-321  }
-322
-323  public boolean isPartial() {
-324return partial;
-325  }
-326
-327  public void setPartialFlag(boolean 
isPartial) {
-328if (this.partial && 
!isPartial) {
-329  for (Map.Entry entry : map.entrySet()) {
-330
entry.getValue().unsetPartialFlag();
-331  }
-332}
-333this.partial = isPartial;
-334  }
-335
-336  /**
-337   * @return true, if no procedure is 
active, else false.
-338   */
-339  public boolean isEmpty() {
-340for (Map.Entry entry : map.entrySet()) {
-341  if (!entry.getValue().isEmpty()) 
{
-342return false;
-343  }
-344}
-345return true;
-346  }
-347
-348  /**
-349   * @return true if all procedure was 
modified or deleted since last call to
-350   * {@link #resetModified()}.
-351   */
-352  public boolean isAllModified() {
-353for (Map.Entry entry : map.entrySet()) {
-354  if 
(!entry.getValue().isAllModified()) {
-355return false;
-356  }
-357}
-358return true;
-359  }
-360
-361  /**
-362   * Will be used when there are too many 
proc wal files. We will rewrite the states of the active
-363   * procedures in the oldest proc wal 
file so that we can delete it.
-364   * @return all the active procedure ids 
in this tracker.
-365   */
-366  public long[] getAllActiveProcIds() {
-367return 
map.values().stream().map(BitSetNode::getActiveProcIds).filter(p -> p.length 
> 0)
-368  
.flatMapToLong(LongStream::of).toArray();
-369  }
-370
-371  /**
-372   * Clears the list of updated procedure 
ids. This doesn't affect global list of active
-373   * procedure ids.
-374   */
-375  public void resetModified() {
-376for (Map.Entry entry : map.entrySet()) {
-377  entry.getValue().resetModified();
-378}
-379minModifiedProcId = Long.MAX_VALUE;
-380maxModifiedProcId = Long.MIN_VALUE;
-381  }
-382
-383  private BitSetNode getOrCreateNode(long 
procId) {
-384// If procId can fit in left node 
(directly or by growing it)
-385BitSetNode leftNode = null;
-386boolean leftCanGrow = false;
-387Map.Entry 
leftEntry = map.floorEntry(procId);
-388if (leftEntry != null) {
-389  leftNode = leftEntry.getValue();
-390  if (leftNode.contains(procId)) {
-391return