hbase git commit: HBASE-16804 JavaHBaseContext.streamBulkGet is void but should be JavaDStream (Igor Yurinok)
Repository: hbase Updated Branches: refs/heads/master 0f21c41ed -> 83fc59d5c HBASE-16804 JavaHBaseContext.streamBulkGet is void but should be JavaDStream (Igor Yurinok) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83fc59d5 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83fc59d5 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83fc59d5 Branch: refs/heads/master Commit: 83fc59d5c98e4847eda01d08510cd3c0ee9cfa05 Parents: 0f21c41 Author: tedyuAuthored: Mon Oct 10 19:34:21 2016 -0700 Committer: tedyu Committed: Mon Oct 10 19:34:21 2016 -0700 -- .../scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/83fc59d5/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala -- diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala index 7deb5b8..a99e0e3 100644 --- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala +++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala @@ -281,7 +281,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, batchSize: Integer, javaDStream: JavaDStream[T], makeGet: Function[T, Get], - convertResult: Function[Result, U]) { + convertResult: Function[Result, U]): JavaDStream[U] = { JavaDStream.fromDStream(hbaseContext.streamBulkGet(tableName, batchSize, javaDStream.dstream,
hbase git commit: HBASE-16801 The Append/Increment may return the data from future (Chiaping Tsai)
Repository: hbase Updated Branches: refs/heads/master 7493e79f1 -> 0f21c41ed HBASE-16801 The Append/Increment may return the data from future (Chiaping Tsai) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f21c41e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f21c41e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f21c41e Branch: refs/heads/master Commit: 0f21c41eda828eae42dd9bab102988c252b1088a Parents: 7493e79 Author: tedyuAuthored: Mon Oct 10 18:31:31 2016 -0700 Committer: tedyu Committed: Mon Oct 10 18:31:31 2016 -0700 -- .../hbase/regionserver/ServerNonceManager.java | 2 +- .../regionserver/TestServerNonceManager.java| 21 2 files changed, 22 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0f21c41e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index 459b69a..47842fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -62,7 +62,7 @@ public class ServerNonceManager { private static final long WAITING_BIT = 4; private static final long ALL_FLAG_BITS = WAITING_BIT | STATE_BITS; -private static long mvcc; +private long mvcc; @Override public String toString() { http://git-wip-us.apache.org/repos/asf/hbase/blob/0f21c41e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java index 5efc12c..89e414d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java @@ -46,6 +46,27 @@ import org.mockito.stubbing.Answer; public class TestServerNonceManager { @Test + public void testMvcc() throws Exception { +ServerNonceManager nm = createManager(); +final long group = 100; +final long nonce = 1; +final long initMvcc = 999; +assertTrue(nm.startOperation(group, nonce, createStoppable())); +nm.addMvccToOperationContext(group, nonce, initMvcc); +nm.endOperation(group, nonce, true); +assertEquals(initMvcc, nm.getMvccFromOperationContext(group, nonce)); +long newMvcc = initMvcc + 1; +for (long newNonce = nonce + 1; newNonce != (nonce + 5); ++newNonce) { + assertTrue(nm.startOperation(group, newNonce, createStoppable())); + nm.addMvccToOperationContext(group, newNonce, newMvcc); + nm.endOperation(group, newNonce, true); + assertEquals(newMvcc, nm.getMvccFromOperationContext(group, newNonce)); + ++newMvcc; +} +assertEquals(initMvcc, nm.getMvccFromOperationContext(group, nonce)); + } + + @Test public void testNormalStartEnd() throws Exception { final long[] numbers = new long[] { NO_NONCE, 1, 2, Long.MAX_VALUE, Long.MIN_VALUE }; ServerNonceManager nm = createManager();
hbase git commit: HBASE-16788 Guard HFile archiving under a separate lock
Repository: hbase Updated Branches: refs/heads/branch-1.3 3e32164e4 -> 8eea3a577 HBASE-16788 Guard HFile archiving under a separate lock Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8eea3a57 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8eea3a57 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8eea3a57 Branch: refs/heads/branch-1.3 Commit: 8eea3a5777a25907dcf6486bfeafd8482a072b80 Parents: 3e32164 Author: Gary HelmlingAuthored: Fri Oct 7 10:42:20 2016 -0700 Committer: Gary Helmling Committed: Mon Oct 10 16:11:32 2016 -0700 -- .../hadoop/hbase/regionserver/HStore.java | 54 +++-- .../TestCompactionArchiveConcurrentClose.java | 198 +++ 2 files changed, 236 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8eea3a57/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 66eaebc..555a191 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -41,6 +41,7 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; @@ -148,6 +149,19 @@ public class HStore implements Store { * - completing a compaction */ final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + /** + * Lock specific to archiving compacted store files. This avoids races around + * the combination of retrieving the list of compacted files and moving them to + * the archive directory. Since this is usually a background process (other than + * on close), we don't want to handle this with the store write lock, which would + * block readers and degrade performance. + * + * Locked by: + * - CompactedHFilesDispatchHandler via closeAndArchiveCompactedFiles() + * - close() + */ + final ReentrantLock archiveLock = new ReentrantLock(); + private final boolean verifyBulkLoads; private ScanInfo scanInfo; @@ -824,6 +838,7 @@ public class HStore implements Store { @Override public ImmutableCollection close() throws IOException { +this.archiveLock.lock(); this.lock.writeLock().lock(); try { // Clear so metrics doesn't find them. @@ -879,6 +894,7 @@ public class HStore implements Store { return result; } finally { this.lock.writeLock().unlock(); + this.archiveLock.unlock(); } } @@ -2630,26 +2646,32 @@ public class HStore implements Store { } @Override - public void closeAndArchiveCompactedFiles() throws IOException { -lock.readLock().lock(); -Collection copyCompactedfiles = null; + public synchronized void closeAndArchiveCompactedFiles() throws IOException { +// ensure other threads do not attempt to archive the same files on close() +archiveLock.lock(); try { - Collection compactedfiles = - this.getStoreEngine().getStoreFileManager().getCompactedfiles(); - if (compactedfiles != null && compactedfiles.size() != 0) { -// Do a copy under read lock -copyCompactedfiles = new ArrayList(compactedfiles); - } else { -if (LOG.isTraceEnabled()) { - LOG.trace("No compacted files to archive"); - return; + lock.readLock().lock(); + Collection copyCompactedfiles = null; + try { +Collection compactedfiles = +this.getStoreEngine().getStoreFileManager().getCompactedfiles(); +if (compactedfiles != null && compactedfiles.size() != 0) { + // Do a copy under read lock + copyCompactedfiles = new ArrayList(compactedfiles); +} else { + if (LOG.isTraceEnabled()) { +LOG.trace("No compacted files to archive"); +return; + } } + } finally { +lock.readLock().unlock(); + } + if (copyCompactedfiles != null && !copyCompactedfiles.isEmpty()) { +removeCompactedfiles(copyCompactedfiles); } } finally { - lock.readLock().unlock(); -} -if (copyCompactedfiles != null && !copyCompactedfiles.isEmpty()) { - removeCompactedfiles(copyCompactedfiles); + archiveLock.unlock(); } }
hbase git commit: HBASE-16788 Guard HFile archiving under a separate lock
Repository: hbase Updated Branches: refs/heads/branch-1 59ca4dad7 -> 89bef67d0 HBASE-16788 Guard HFile archiving under a separate lock Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89bef67d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89bef67d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89bef67d Branch: refs/heads/branch-1 Commit: 89bef67d0c020662599f682309c47a5ed25c9b32 Parents: 59ca4da Author: Gary HelmlingAuthored: Fri Oct 7 10:42:20 2016 -0700 Committer: Gary Helmling Committed: Mon Oct 10 16:06:55 2016 -0700 -- .../hadoop/hbase/regionserver/HStore.java | 54 +++-- .../TestCompactionArchiveConcurrentClose.java | 198 +++ 2 files changed, 236 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/89bef67d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 6ee6bb5..74f5a1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -41,6 +41,7 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; @@ -149,6 +150,19 @@ public class HStore implements Store { * - completing a compaction */ final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + /** + * Lock specific to archiving compacted store files. This avoids races around + * the combination of retrieving the list of compacted files and moving them to + * the archive directory. Since this is usually a background process (other than + * on close), we don't want to handle this with the store write lock, which would + * block readers and degrade performance. + * + * Locked by: + * - CompactedHFilesDispatchHandler via closeAndArchiveCompactedFiles() + * - close() + */ + final ReentrantLock archiveLock = new ReentrantLock(); + private final boolean verifyBulkLoads; private ScanInfo scanInfo; @@ -835,6 +849,7 @@ public class HStore implements Store { @Override public ImmutableCollection close() throws IOException { +this.archiveLock.lock(); this.lock.writeLock().lock(); try { // Clear so metrics doesn't find them. @@ -890,6 +905,7 @@ public class HStore implements Store { return result; } finally { this.lock.writeLock().unlock(); + this.archiveLock.unlock(); } } @@ -2641,26 +2657,32 @@ public class HStore implements Store { } @Override - public void closeAndArchiveCompactedFiles() throws IOException { -lock.readLock().lock(); -Collection copyCompactedfiles = null; + public synchronized void closeAndArchiveCompactedFiles() throws IOException { +// ensure other threads do not attempt to archive the same files on close() +archiveLock.lock(); try { - Collection compactedfiles = - this.getStoreEngine().getStoreFileManager().getCompactedfiles(); - if (compactedfiles != null && compactedfiles.size() != 0) { -// Do a copy under read lock -copyCompactedfiles = new ArrayList(compactedfiles); - } else { -if (LOG.isTraceEnabled()) { - LOG.trace("No compacted files to archive"); - return; + lock.readLock().lock(); + Collection copyCompactedfiles = null; + try { +Collection compactedfiles = +this.getStoreEngine().getStoreFileManager().getCompactedfiles(); +if (compactedfiles != null && compactedfiles.size() != 0) { + // Do a copy under read lock + copyCompactedfiles = new ArrayList(compactedfiles); +} else { + if (LOG.isTraceEnabled()) { +LOG.trace("No compacted files to archive"); +return; + } } + } finally { +lock.readLock().unlock(); + } + if (copyCompactedfiles != null && !copyCompactedfiles.isEmpty()) { +removeCompactedfiles(copyCompactedfiles); } } finally { - lock.readLock().unlock(); -} -if (copyCompactedfiles != null && !copyCompactedfiles.isEmpty()) { - removeCompactedfiles(copyCompactedfiles); + archiveLock.unlock(); } }
hbase git commit: HBASE-16788 Guard HFile archiving under a separate lock
Repository: hbase Updated Branches: refs/heads/master fcef2c02c -> 7493e79f1 HBASE-16788 Guard HFile archiving under a separate lock Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7493e79f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7493e79f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7493e79f Branch: refs/heads/master Commit: 7493e79f15e0a1217dc50ca4431d6ded07df479f Parents: fcef2c0 Author: Gary HelmlingAuthored: Fri Oct 7 10:42:20 2016 -0700 Committer: Gary Helmling Committed: Mon Oct 10 15:58:12 2016 -0700 -- .../hadoop/hbase/regionserver/HStore.java | 54 +++-- .../TestCompactionArchiveConcurrentClose.java | 198 +++ 2 files changed, 236 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/7493e79f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index e9c05c7..5418138 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -46,6 +46,7 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; @@ -147,6 +148,19 @@ public class HStore implements Store { * - completing a compaction */ final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + /** + * Lock specific to archiving compacted store files. This avoids races around + * the combination of retrieving the list of compacted files and moving them to + * the archive directory. Since this is usually a background process (other than + * on close), we don't want to handle this with the store write lock, which would + * block readers and degrade performance. + * + * Locked by: + * - CompactedHFilesDispatchHandler via closeAndArchiveCompactedFiles() + * - close() + */ + final ReentrantLock archiveLock = new ReentrantLock(); + private final boolean verifyBulkLoads; private ScanInfo scanInfo; @@ -794,6 +808,7 @@ public class HStore implements Store { @Override public ImmutableCollection close() throws IOException { +this.archiveLock.lock(); this.lock.writeLock().lock(); try { // Clear so metrics doesn't find them. @@ -849,6 +864,7 @@ public class HStore implements Store { return result; } finally { this.lock.writeLock().unlock(); + this.archiveLock.unlock(); } } @@ -2357,26 +2373,32 @@ public class HStore implements Store { } @Override - public void closeAndArchiveCompactedFiles() throws IOException { -lock.readLock().lock(); -Collection copyCompactedfiles = null; + public synchronized void closeAndArchiveCompactedFiles() throws IOException { +// ensure other threads do not attempt to archive the same files on close() +archiveLock.lock(); try { - Collection compactedfiles = - this.getStoreEngine().getStoreFileManager().getCompactedfiles(); - if (compactedfiles != null && compactedfiles.size() != 0) { -// Do a copy under read lock -copyCompactedfiles = new ArrayList(compactedfiles); - } else { -if (LOG.isTraceEnabled()) { - LOG.trace("No compacted files to archive"); - return; + lock.readLock().lock(); + Collection copyCompactedfiles = null; + try { +Collection compactedfiles = +this.getStoreEngine().getStoreFileManager().getCompactedfiles(); +if (compactedfiles != null && compactedfiles.size() != 0) { + // Do a copy under read lock + copyCompactedfiles = new ArrayList(compactedfiles); +} else { + if (LOG.isTraceEnabled()) { +LOG.trace("No compacted files to archive"); +return; + } } + } finally { +lock.readLock().unlock(); + } + if (copyCompactedfiles != null && !copyCompactedfiles.isEmpty()) { +removeCompactedfiles(copyCompactedfiles); } } finally { - lock.readLock().unlock(); -} -if (copyCompactedfiles != null && !copyCompactedfiles.isEmpty()) { - removeCompactedfiles(copyCompactedfiles); + archiveLock.unlock(); } }
hbase git commit: HBASE-16661 Add last major compaction age to per-region metrics
Repository: hbase Updated Branches: refs/heads/branch-1.3 387a08657 -> 3e32164e4 HBASE-16661 Add last major compaction age to per-region metrics Signed-off-by: Gary HelmlingProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3e32164e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3e32164e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3e32164e Branch: refs/heads/branch-1.3 Commit: 3e32164e44245b4d9e2ffadcb4ae233184f0f9ea Parents: 387a086 Author: Dustin Pho Authored: Sat Sep 24 14:58:37 2016 -0700 Committer: Gary Helmling Committed: Mon Oct 10 15:26:47 2016 -0700 -- .../hbase/regionserver/MetricsRegionSource.java| 2 ++ .../hbase/regionserver/MetricsRegionWrapper.java | 5 + .../regionserver/MetricsRegionSourceImpl.java | 4 .../regionserver/TestMetricsRegionSourceImpl.java | 5 + .../apache/hadoop/hbase/regionserver/HRegion.java | 9 +++-- .../regionserver/MetricsRegionWrapperImpl.java | 17 + .../apache/hadoop/hbase/regionserver/Region.java | 4 ++-- .../regionserver/MetricsRegionWrapperStub.java | 5 + 8 files changed, 43 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/3e32164e/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index 911c757..12ef07c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -29,10 +29,12 @@ public interface MetricsRegionSource extends Comparable { String SIZE_VALUE_NAME = "size"; String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount"; String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount"; + String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge"; String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount"; String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount"; String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have completed."; String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed."; + String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in milliseconds."; String NUM_BYTES_COMPACTED_DESC = "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = http://git-wip-us.apache.org/repos/asf/hbase/blob/3e32164e/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 0482d2a..9b7acd3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -101,6 +101,11 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** + * @return Age of the last major compaction + */ + long getLastMajorCompactionAge(); + + /** * Returns the total number of compactions that have been reported as failed on this region. * Note that a given compaction can be reported as both completed and failed if an exception * is thrown in the processing after {@code HRegion.compact()}. http://git-wip-us.apache.org/repos/asf/hbase/blob/3e32164e/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java -- diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index ae579cf..16f36bb 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -269,6 +269,10 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { MetricsRegionSource.COMPACTIONS_FAILED_DESC),
hbase git commit: HBASE-16661 Add last major compaction age to per-region metrics
Repository: hbase Updated Branches: refs/heads/branch-1 66038b8c1 -> 59ca4dad7 HBASE-16661 Add last major compaction age to per-region metrics Signed-off-by: Gary HelmlingProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59ca4dad Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59ca4dad Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59ca4dad Branch: refs/heads/branch-1 Commit: 59ca4dad70cee46314c992766fd9303d1e41ee2c Parents: 66038b8 Author: Dustin Pho Authored: Sat Sep 24 14:58:37 2016 -0700 Committer: Gary Helmling Committed: Mon Oct 10 15:21:53 2016 -0700 -- .../hbase/regionserver/MetricsRegionSource.java| 2 ++ .../hbase/regionserver/MetricsRegionWrapper.java | 5 + .../regionserver/MetricsRegionSourceImpl.java | 4 .../regionserver/TestMetricsRegionSourceImpl.java | 5 + .../apache/hadoop/hbase/regionserver/HRegion.java | 9 +++-- .../regionserver/MetricsRegionWrapperImpl.java | 17 + .../apache/hadoop/hbase/regionserver/Region.java | 4 ++-- .../regionserver/MetricsRegionWrapperStub.java | 5 + 8 files changed, 43 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/59ca4dad/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index 911c757..12ef07c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -29,10 +29,12 @@ public interface MetricsRegionSource extends Comparable { String SIZE_VALUE_NAME = "size"; String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount"; String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount"; + String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge"; String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount"; String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount"; String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have completed."; String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed."; + String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in milliseconds."; String NUM_BYTES_COMPACTED_DESC = "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = http://git-wip-us.apache.org/repos/asf/hbase/blob/59ca4dad/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 0482d2a..9b7acd3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -101,6 +101,11 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** + * @return Age of the last major compaction + */ + long getLastMajorCompactionAge(); + + /** * Returns the total number of compactions that have been reported as failed on this region. * Note that a given compaction can be reported as both completed and failed if an exception * is thrown in the processing after {@code HRegion.compact()}. http://git-wip-us.apache.org/repos/asf/hbase/blob/59ca4dad/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java -- diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index ae579cf..16f36bb 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -269,6 +269,10 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { MetricsRegionSource.COMPACTIONS_FAILED_DESC),
hbase git commit: HBASE-16661 Add last major compaction age to per-region metrics
Repository: hbase Updated Branches: refs/heads/master 341f049e7 -> fcef2c02c HBASE-16661 Add last major compaction age to per-region metrics Signed-off-by: Gary HelmlingProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fcef2c02 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fcef2c02 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fcef2c02 Branch: refs/heads/master Commit: fcef2c02c99a0fd62135aa86e95a03edf1fef3ce Parents: 341f049 Author: Dustin Pho Authored: Sat Sep 24 14:58:37 2016 -0700 Committer: Gary Helmling Committed: Mon Oct 10 15:16:12 2016 -0700 -- .../hbase/regionserver/MetricsRegionSource.java| 2 ++ .../hbase/regionserver/MetricsRegionWrapper.java | 5 + .../regionserver/MetricsRegionSourceImpl.java | 4 .../regionserver/TestMetricsRegionSourceImpl.java | 5 + .../apache/hadoop/hbase/regionserver/HRegion.java | 9 +++-- .../regionserver/MetricsRegionWrapperImpl.java | 17 + .../apache/hadoop/hbase/regionserver/Region.java | 4 ++-- .../regionserver/MetricsRegionWrapperStub.java | 5 + 8 files changed, 43 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/fcef2c02/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index 911c757..12ef07c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -29,10 +29,12 @@ public interface MetricsRegionSource extends Comparable { String SIZE_VALUE_NAME = "size"; String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount"; String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount"; + String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge"; String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount"; String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount"; String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have completed."; String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed."; + String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in milliseconds."; String NUM_BYTES_COMPACTED_DESC = "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = http://git-wip-us.apache.org/repos/asf/hbase/blob/fcef2c02/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index e3fd5c3..cfc0742 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -106,6 +106,11 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** + * @return Age of the last major compaction + */ + long getLastMajorCompactionAge(); + + /** * Returns the total number of compactions that have been reported as failed on this region. * Note that a given compaction can be reported as both completed and failed if an exception * is thrown in the processing after {@code HRegion.compact()}. http://git-wip-us.apache.org/repos/asf/hbase/blob/fcef2c02/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java -- diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index c0d71d5..24064ad 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -269,6 +269,10 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { MetricsRegionSource.COMPACTIONS_FAILED_DESC),
[3/5] hbase git commit: HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval()
HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval() Signed-off-by: Matteo BertozziProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/387a0865 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/387a0865 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/387a0865 Branch: refs/heads/branch-1.3 Commit: 387a08657c06e350e3d912e98486c8ae3a1fe025 Parents: 6a15986 Author: Huaxiang Sun Authored: Mon Oct 10 14:12:03 2016 -0700 Committer: Matteo Bertozzi Committed: Mon Oct 10 14:17:44 2016 -0700 -- .../quotas/AverageIntervalRateLimiter.java | 20 ++- .../hbase/quotas/DefaultOperationQuota.java | 47 +++ .../hadoop/hbase/quotas/NoopOperationQuota.java | 5 - .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 9 -- .../hadoop/hbase/quotas/OperationQuota.java | 59 + .../hadoop/hbase/quotas/QuotaLimiter.java | 9 -- .../apache/hadoop/hbase/quotas/RateLimiter.java | 38 +- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 13 -- .../hadoop/hbase/quotas/TestRateLimiter.java| 130 +++ 9 files changed, 196 insertions(+), 134 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/387a0865/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java index 75e6aea..9320d7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java @@ -34,12 +34,21 @@ public class AverageIntervalRateLimiter extends RateLimiter { return limit; } -long delta = (limit * (now - nextRefillTime)) / super.getTimeUnitInMillis(); +long timeInterval = now - nextRefillTime; +long delta = 0; +long timeUnitInMillis = super.getTimeUnitInMillis(); +if (timeInterval >= timeUnitInMillis) { + delta = limit; +} else if (timeInterval > 0) { + double r = ((double)timeInterval / (double)timeUnitInMillis) * limit; + delta = (long)r; +} + if (delta > 0) { this.nextRefillTime = now; - return Math.min(limit, delta); } -return 0; + +return delta; } @Override @@ -47,8 +56,9 @@ public class AverageIntervalRateLimiter extends RateLimiter { if (nextRefillTime == -1) { return 0; } -long timeUnitInMillis = super.getTimeUnitInMillis(); -return ((amount * timeUnitInMillis) / limit) - ((available * timeUnitInMillis) / limit); + +double r = ((double)(amount - available)) * super.getTimeUnitInMillis() / limit; +return (long)r; } // This method is for strictly testing purpose only http://git-wip-us.apache.org/repos/asf/hbase/blob/387a0865/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index 654e8fa..6caac74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -31,8 +31,7 @@ public class DefaultOperationQuota implements OperationQuota { private long readAvailable = 0; private long writeConsumed = 0; private long readConsumed = 0; - - private AvgOperationSize avgOpSize = new AvgOperationSize(); + private final long[] operationSize; public DefaultOperationQuota(final QuotaLimiter... limiters) { this(Arrays.asList(limiters)); @@ -43,6 +42,12 @@ public class DefaultOperationQuota implements OperationQuota { */ public DefaultOperationQuota(final List limiters) { this.limiters = limiters; +int size = OperationType.values().length; +operationSize = new long[size]; + +for (int i = 0; i < size; ++i) { + operationSize[i] = 0; +} } @Override @@ -68,22 +73,12 @@ public class DefaultOperationQuota implements OperationQuota { @Override public void close() { -// Calculate and set the average size of get, scan and mutate for the current operation -long getSize = avgOpSize.getAvgOperationSize(OperationType.GET); -long scanSize = avgOpSize.getAvgOperationSize(OperationType.SCAN); -long mutationSize =
[5/5] hbase git commit: HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval()
HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval() Signed-off-by: Matteo BertozziProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/47c8659d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/47c8659d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/47c8659d Branch: refs/heads/branch-1.1 Commit: 47c8659d2ac6fd5bb715e30e317aa44ee574d8b0 Parents: 9620dc4 Author: Huaxiang Sun Authored: Mon Oct 10 14:12:03 2016 -0700 Committer: Matteo Bertozzi Committed: Mon Oct 10 14:29:11 2016 -0700 -- .../quotas/AverageIntervalRateLimiter.java | 20 ++- .../hbase/quotas/DefaultOperationQuota.java | 47 +++ .../hadoop/hbase/quotas/NoopOperationQuota.java | 5 - .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 9 -- .../hadoop/hbase/quotas/OperationQuota.java | 59 + .../hadoop/hbase/quotas/QuotaLimiter.java | 9 -- .../apache/hadoop/hbase/quotas/RateLimiter.java | 38 +- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 13 -- .../hadoop/hbase/quotas/TestRateLimiter.java| 130 +++ 9 files changed, 196 insertions(+), 134 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/47c8659d/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java index 75e6aea..9320d7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java @@ -34,12 +34,21 @@ public class AverageIntervalRateLimiter extends RateLimiter { return limit; } -long delta = (limit * (now - nextRefillTime)) / super.getTimeUnitInMillis(); +long timeInterval = now - nextRefillTime; +long delta = 0; +long timeUnitInMillis = super.getTimeUnitInMillis(); +if (timeInterval >= timeUnitInMillis) { + delta = limit; +} else if (timeInterval > 0) { + double r = ((double)timeInterval / (double)timeUnitInMillis) * limit; + delta = (long)r; +} + if (delta > 0) { this.nextRefillTime = now; - return Math.min(limit, delta); } -return 0; + +return delta; } @Override @@ -47,8 +56,9 @@ public class AverageIntervalRateLimiter extends RateLimiter { if (nextRefillTime == -1) { return 0; } -long timeUnitInMillis = super.getTimeUnitInMillis(); -return ((amount * timeUnitInMillis) / limit) - ((available * timeUnitInMillis) / limit); + +double r = ((double)(amount - available)) * super.getTimeUnitInMillis() / limit; +return (long)r; } // This method is for strictly testing purpose only http://git-wip-us.apache.org/repos/asf/hbase/blob/47c8659d/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index 654e8fa..6caac74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -31,8 +31,7 @@ public class DefaultOperationQuota implements OperationQuota { private long readAvailable = 0; private long writeConsumed = 0; private long readConsumed = 0; - - private AvgOperationSize avgOpSize = new AvgOperationSize(); + private final long[] operationSize; public DefaultOperationQuota(final QuotaLimiter... limiters) { this(Arrays.asList(limiters)); @@ -43,6 +42,12 @@ public class DefaultOperationQuota implements OperationQuota { */ public DefaultOperationQuota(final List limiters) { this.limiters = limiters; +int size = OperationType.values().length; +operationSize = new long[size]; + +for (int i = 0; i < size; ++i) { + operationSize[i] = 0; +} } @Override @@ -68,22 +73,12 @@ public class DefaultOperationQuota implements OperationQuota { @Override public void close() { -// Calculate and set the average size of get, scan and mutate for the current operation -long getSize = avgOpSize.getAvgOperationSize(OperationType.GET); -long scanSize = avgOpSize.getAvgOperationSize(OperationType.SCAN); -long mutationSize =
[1/5] hbase git commit: HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval()
Repository: hbase Updated Branches: refs/heads/branch-1 acb1392b1 -> 66038b8c1 refs/heads/branch-1.1 9620dc4e7 -> 47c8659d2 refs/heads/branch-1.2 bd38f8dbf -> 55e989090 refs/heads/branch-1.3 6a1598674 -> 387a08657 refs/heads/master c930bc92f -> 341f049e7 HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval() Signed-off-by: Matteo BertozziProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/341f049e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/341f049e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/341f049e Branch: refs/heads/master Commit: 341f049e7734be0677ce38018dcb125b991469e7 Parents: c930bc9 Author: Huaxiang Sun Authored: Mon Oct 10 14:06:28 2016 -0700 Committer: Matteo Bertozzi Committed: Mon Oct 10 14:07:00 2016 -0700 -- .../quotas/AverageIntervalRateLimiter.java | 20 ++- .../hbase/quotas/DefaultOperationQuota.java | 46 ++- .../hadoop/hbase/quotas/NoopOperationQuota.java | 5 - .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 9 -- .../hadoop/hbase/quotas/OperationQuota.java | 53 .../hadoop/hbase/quotas/QuotaLimiter.java | 9 -- .../apache/hadoop/hbase/quotas/RateLimiter.java | 38 +- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 13 -- .../hadoop/hbase/quotas/TestRateLimiter.java| 130 +++ 9 files changed, 191 insertions(+), 132 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/341f049e/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java index 75e6aea..9320d7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java @@ -34,12 +34,21 @@ public class AverageIntervalRateLimiter extends RateLimiter { return limit; } -long delta = (limit * (now - nextRefillTime)) / super.getTimeUnitInMillis(); +long timeInterval = now - nextRefillTime; +long delta = 0; +long timeUnitInMillis = super.getTimeUnitInMillis(); +if (timeInterval >= timeUnitInMillis) { + delta = limit; +} else if (timeInterval > 0) { + double r = ((double)timeInterval / (double)timeUnitInMillis) * limit; + delta = (long)r; +} + if (delta > 0) { this.nextRefillTime = now; - return Math.min(limit, delta); } -return 0; + +return delta; } @Override @@ -47,8 +56,9 @@ public class AverageIntervalRateLimiter extends RateLimiter { if (nextRefillTime == -1) { return 0; } -long timeUnitInMillis = super.getTimeUnitInMillis(); -return ((amount * timeUnitInMillis) / limit) - ((available * timeUnitInMillis) / limit); + +double r = ((double)(amount - available)) * super.getTimeUnitInMillis() / limit; +return (long)r; } // This method is for strictly testing purpose only http://git-wip-us.apache.org/repos/asf/hbase/blob/341f049e/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index 34c749e..9291286 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.quotas.OperationQuota.AvgOperationSize; -import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType; @InterfaceAudience.Private @InterfaceStability.Evolving @@ -40,8 +38,7 @@ public class DefaultOperationQuota implements OperationQuota { private long readAvailable = 0; private long writeConsumed = 0; private long readConsumed = 0; - - private AvgOperationSize avgOpSize = new AvgOperationSize(); + private final long[] operationSize; public DefaultOperationQuota(final QuotaLimiter... limiters) { this(Arrays.asList(limiters)); @@ -52,6 +49,12 @@ public class
[4/5] hbase git commit: HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval()
HBASE-16699 Overflows in AverageIntervalRateLimiter's refill() and getWaitInterval() Signed-off-by: Matteo BertozziProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55e98909 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55e98909 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55e98909 Branch: refs/heads/branch-1.2 Commit: 55e989090df034110dfdde4b7803c07ff92b8321 Parents: bd38f8d Author: Huaxiang Sun Authored: Mon Oct 10 14:12:03 2016 -0700 Committer: Matteo Bertozzi Committed: Mon Oct 10 14:22:18 2016 -0700 -- .../quotas/AverageIntervalRateLimiter.java | 20 ++- .../hbase/quotas/DefaultOperationQuota.java | 47 +++ .../hadoop/hbase/quotas/NoopOperationQuota.java | 5 - .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 9 -- .../hadoop/hbase/quotas/OperationQuota.java | 59 + .../hadoop/hbase/quotas/QuotaLimiter.java | 9 -- .../apache/hadoop/hbase/quotas/RateLimiter.java | 38 +- .../hadoop/hbase/quotas/TimeBasedLimiter.java | 13 -- .../hadoop/hbase/quotas/TestRateLimiter.java| 130 +++ 9 files changed, 196 insertions(+), 134 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/55e98909/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java index 75e6aea..9320d7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java @@ -34,12 +34,21 @@ public class AverageIntervalRateLimiter extends RateLimiter { return limit; } -long delta = (limit * (now - nextRefillTime)) / super.getTimeUnitInMillis(); +long timeInterval = now - nextRefillTime; +long delta = 0; +long timeUnitInMillis = super.getTimeUnitInMillis(); +if (timeInterval >= timeUnitInMillis) { + delta = limit; +} else if (timeInterval > 0) { + double r = ((double)timeInterval / (double)timeUnitInMillis) * limit; + delta = (long)r; +} + if (delta > 0) { this.nextRefillTime = now; - return Math.min(limit, delta); } -return 0; + +return delta; } @Override @@ -47,8 +56,9 @@ public class AverageIntervalRateLimiter extends RateLimiter { if (nextRefillTime == -1) { return 0; } -long timeUnitInMillis = super.getTimeUnitInMillis(); -return ((amount * timeUnitInMillis) / limit) - ((available * timeUnitInMillis) / limit); + +double r = ((double)(amount - available)) * super.getTimeUnitInMillis() / limit; +return (long)r; } // This method is for strictly testing purpose only http://git-wip-us.apache.org/repos/asf/hbase/blob/55e98909/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java index 654e8fa..6caac74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java @@ -31,8 +31,7 @@ public class DefaultOperationQuota implements OperationQuota { private long readAvailable = 0; private long writeConsumed = 0; private long readConsumed = 0; - - private AvgOperationSize avgOpSize = new AvgOperationSize(); + private final long[] operationSize; public DefaultOperationQuota(final QuotaLimiter... limiters) { this(Arrays.asList(limiters)); @@ -43,6 +42,12 @@ public class DefaultOperationQuota implements OperationQuota { */ public DefaultOperationQuota(final List limiters) { this.limiters = limiters; +int size = OperationType.values().length; +operationSize = new long[size]; + +for (int i = 0; i < size; ++i) { + operationSize[i] = 0; +} } @Override @@ -68,22 +73,12 @@ public class DefaultOperationQuota implements OperationQuota { @Override public void close() { -// Calculate and set the average size of get, scan and mutate for the current operation -long getSize = avgOpSize.getAvgOperationSize(OperationType.GET); -long scanSize = avgOpSize.getAvgOperationSize(OperationType.SCAN); -long mutationSize =
[2/2] hbase git commit: HBASE-16767 Mob compaction needs to clean up temporary files in face of IOExceptions.
HBASE-16767 Mob compaction needs to clean up temporary files in face of IOExceptions. 1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the code in compactMobFilesInBatch() to make it more readable. Signed-off-by: Jonathan M HsiehProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c930bc92 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c930bc92 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c930bc92 Branch: refs/heads/master Commit: c930bc92f4f2beac4fe1f9a4f6364648317e1eee Parents: 932a196 Author: Huaxiang Sun Authored: Fri Oct 7 15:47:06 2016 -0700 Committer: Sean Busbey Committed: Mon Oct 10 14:48:24 2016 -0500 -- .../compactions/PartitionedMobCompactor.java| 157 +++ .../TestPartitionedMobCompactor.java| 90 ++- 2 files changed, 178 insertions(+), 69 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c930bc92/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java index 29b7e8a..33aecc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java @@ -229,8 +229,8 @@ public class PartitionedMobCompactor extends MobCompactor { } // archive the del files if all the mob files are selected. if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) { - LOG.info("After a mob compaction with all files selected, archiving the del files " -+ newDelPaths); + LOG.info( + "After a mob compaction with all files selected, archiving the del files " + newDelPaths); try { MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), newDelFiles); } catch (IOException e) { @@ -381,7 +381,7 @@ public class PartitionedMobCompactor extends MobCompactor { List filesToCompact, int batch, Path bulkloadPathOfPartition, Path bulkloadColumnPath, List newFiles) -throws IOException { + throws IOException { // open scanner to the selected mob files and del files. StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES); // the mob files to be compacted, not include the del files. @@ -392,62 +392,92 @@ public class PartitionedMobCompactor extends MobCompactor { StoreFileWriter writer = null; StoreFileWriter refFileWriter = null; Path filePath = null; -Path refFilePath = null; long mobCells = 0; +boolean cleanupTmpMobFile = false; +boolean cleanupBulkloadDirOfPartition = false; +boolean cleanupCommittedMobFile = false; +boolean closeReaders= true; + try { - writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getDate(), -tempPath, Long.MAX_VALUE, column.getCompactionCompressionType(), partition.getPartitionId() - .getStartKey(), compactionCacheConfig, cryptoContext); - filePath = writer.getPath(); - byte[] fileName = Bytes.toBytes(filePath.getName()); - // create a temp file and open a writer for it in the bulkloadPath - refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo -.getSecond().longValue(), compactionCacheConfig, cryptoContext); - refFilePath = refFileWriter.getPath(); - List cells = new ArrayList<>(); - boolean hasMore; - ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - do { -hasMore = scanner.next(cells, scannerContext); -for (Cell cell : cells) { - // write the mob cell to the mob file. - writer.append(cell); - // write the new reference cell to the store file. - KeyValue reference = MobUtils.createMobRefKeyValue(cell, fileName, tableNameTag); - refFileWriter.append(reference); - mobCells++; + try { +writer = MobUtils +.createWriter(conf, fs, column, partition.getPartitionId().getDate(), tempPath, +Long.MAX_VALUE, column.getCompactionCompressionType(), +partition.getPartitionId().getStartKey(), compactionCacheConfig, cryptoContext); +
[1/2] hbase git commit: Revert "1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the code in compactMobFilesInBatch() to make it more readable."
Repository: hbase Updated Branches: refs/heads/master 3c35a722d -> c930bc92f Revert "1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the code in compactMobFilesInBatch() to make it more readable." This reverts commit c7cae6be3dccfaa63033b705ea9845f3f088aab6. Missing JIRA ID Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/932a1964 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/932a1964 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/932a1964 Branch: refs/heads/master Commit: 932a1964bf578f953a4fd85a3b58d74185680785 Parents: 3c35a72 Author: Sean BusbeyAuthored: Mon Oct 10 14:47:46 2016 -0500 Committer: Sean Busbey Committed: Mon Oct 10 14:47:46 2016 -0500 -- .../compactions/PartitionedMobCompactor.java| 157 --- .../TestPartitionedMobCompactor.java| 90 +-- 2 files changed, 69 insertions(+), 178 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/932a1964/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java index 33aecc0..29b7e8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java @@ -229,8 +229,8 @@ public class PartitionedMobCompactor extends MobCompactor { } // archive the del files if all the mob files are selected. if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) { - LOG.info( - "After a mob compaction with all files selected, archiving the del files " + newDelPaths); + LOG.info("After a mob compaction with all files selected, archiving the del files " ++ newDelPaths); try { MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), newDelFiles); } catch (IOException e) { @@ -381,7 +381,7 @@ public class PartitionedMobCompactor extends MobCompactor { List filesToCompact, int batch, Path bulkloadPathOfPartition, Path bulkloadColumnPath, List newFiles) - throws IOException { +throws IOException { // open scanner to the selected mob files and del files. StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES); // the mob files to be compacted, not include the del files. @@ -392,92 +392,62 @@ public class PartitionedMobCompactor extends MobCompactor { StoreFileWriter writer = null; StoreFileWriter refFileWriter = null; Path filePath = null; +Path refFilePath = null; long mobCells = 0; -boolean cleanupTmpMobFile = false; -boolean cleanupBulkloadDirOfPartition = false; -boolean cleanupCommittedMobFile = false; -boolean closeReaders= true; - try { - try { -writer = MobUtils -.createWriter(conf, fs, column, partition.getPartitionId().getDate(), tempPath, -Long.MAX_VALUE, column.getCompactionCompressionType(), -partition.getPartitionId().getStartKey(), compactionCacheConfig, cryptoContext); -cleanupTmpMobFile = true; -filePath = writer.getPath(); -byte[] fileName = Bytes.toBytes(filePath.getName()); -// create a temp file and open a writer for it in the bulkloadPath -refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, -fileInfo.getSecond().longValue(), compactionCacheConfig, cryptoContext); -cleanupBulkloadDirOfPartition = true; -List cells = new ArrayList<>(); -boolean hasMore; -ScannerContext scannerContext = -ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); -do { - hasMore = scanner.next(cells, scannerContext); - for (Cell cell : cells) { -// write the mob cell to the mob file. -writer.append(cell); -// write the new reference cell to the store file. -KeyValue reference = MobUtils.createMobRefKeyValue(cell, fileName, tableNameTag); -refFileWriter.append(reference); -mobCells++; - } - cells.clear(); -} while (hasMore); - } finally { -// close the scanner. -scanner.close(); - -if (cleanupTmpMobFile) { -
[3/9] hbase git commit: HBASE-16781 Fix flaky TestMasterProcedureWalLease
HBASE-16781 Fix flaky TestMasterProcedureWalLease Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/29d701a3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/29d701a3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/29d701a3 Branch: refs/heads/hbase-12439 Commit: 29d701a314b6bf56771a217b42c4c10832b15753 Parents: c7cae6b Author: Matteo BertozziAuthored: Fri Oct 7 17:32:19 2016 -0700 Committer: Matteo Bertozzi Committed: Fri Oct 7 18:01:53 2016 -0700 -- .../procedure2/store/wal/WALProcedureStore.java | 41 +--- .../hadoop/hbase/master/MasterServices.java | 5 +++ .../master/procedure/MasterProcedureEnv.java| 7 +++- .../hbase/master/MockNoopMasterServices.java| 5 +++ .../MasterProcedureTestingUtility.java | 1 + 5 files changed, 43 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/29d701a3/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java -- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index 36cf7af..1e60402 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -122,6 +122,7 @@ public class WALProcedureStore extends ProcedureStoreBase { private final AtomicBoolean inSync = new AtomicBoolean(false); private final AtomicLong totalSynced = new AtomicLong(0); private final AtomicLong lastRollTs = new AtomicLong(0); + private final AtomicLong syncId = new AtomicLong(0); private LinkedTransferQueue slotsCache = null; private Set corruptedLogs = null; @@ -226,15 +227,15 @@ public class WALProcedureStore extends ProcedureStoreBase { } @Override - public void stop(boolean abort) { + public void stop(final boolean abort) { if (!setRunning(false)) { return; } -LOG.info("Stopping the WAL Procedure Store"); +LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + + (isSyncAborted() ? " (self aborting)" : "")); sendStopSignal(); - -if (!abort) { +if (!isSyncAborted()) { try { while (syncThread.isAlive()) { sendStopSignal(); @@ -525,6 +526,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } } + final long pushSyncId = syncId.get(); updateStoreTracker(type, procId, subProcIds); slots[slotIndex++] = slot; logId = flushLogId; @@ -540,7 +542,9 @@ public class WALProcedureStore extends ProcedureStoreBase { slotCond.signal(); } - syncCond.await(); + while (pushSyncId == syncId.get() && isRunning()) { +syncCond.await(); + } } catch (InterruptedException e) { Thread.currentThread().interrupt(); sendAbortProcessSignal(); @@ -642,13 +646,15 @@ public class WALProcedureStore extends ProcedureStoreBase { totalSyncedToStore = totalSynced.addAndGet(slotSize); slotIndex = 0; inSync.set(false); + syncId.incrementAndGet(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - sendAbortProcessSignal(); syncException.compareAndSet(null, e); + sendAbortProcessSignal(); throw e; } catch (Throwable t) { syncException.compareAndSet(null, t); + sendAbortProcessSignal(); throw t; } finally { syncCond.signalAll(); @@ -679,13 +685,12 @@ public class WALProcedureStore extends ProcedureStoreBase { } catch (Throwable e) { LOG.warn("unable to sync slots, retry=" + retry); if (++retry >= maxRetriesBeforeRoll) { - if (logRolled >= maxSyncFailureRoll) { + if (logRolled >= maxSyncFailureRoll && isRunning()) { LOG.error("Sync slots after log roll failed, abort.", e); -sendAbortProcessSignal(); throw e; } - if (!rollWriterOrDie()) { + if (!rollWriterWithRetries()) { throw e; } @@ -720,8 +725,8 @@ public class WALProcedureStore extends ProcedureStoreBase { return totalSynced; } - private boolean rollWriterOrDie() { -for (int i = 0; i < rollRetries; ++i) { + private boolean rollWriterWithRetries() { +for (int i = 0; i < rollRetries && isRunning(); ++i) { if (i > 0)
[6/9] hbase git commit: HBASE-16771 VerifyReplication should increase GOODROWS counter if re-comparison passes
HBASE-16771 VerifyReplication should increase GOODROWS counter if re-comparison passes Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccde4393 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccde4393 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccde4393 Branch: refs/heads/hbase-12439 Commit: ccde4393925a6fd5f97a11068cdc96fa4e4d4ac0 Parents: 8a8c608 Author: tedyuAuthored: Sun Oct 9 20:48:28 2016 -0700 Committer: tedyu Committed: Sun Oct 9 20:48:28 2016 -0700 -- .../hbase/mapreduce/replication/VerifyReplication.java | 12 1 file changed, 8 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/ccde4393/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 0273b91..88bf815 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -107,6 +107,7 @@ public class VerifyReplication extends Configured implements Tool { private ResultScanner replicatedScanner; private Result currentCompareRowInPeerTable; private int sleepMsBeforeReCompare; +private String delimiter = ""; private boolean verbose = false; /** @@ -124,6 +125,7 @@ public class VerifyReplication extends Configured implements Tool { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); sleepMsBeforeReCompare = conf.getInt(NAME +".sleepMsBeforeReCompare", 0); +delimiter = conf.get(NAME + ".delimiter", ""); verbose = conf.getBoolean(NAME +".verbose", false); final Scan scan = new Scan(); scan.setBatch(batch); @@ -180,7 +182,6 @@ public class VerifyReplication extends Configured implements Tool { } } catch (Exception e) { logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); -LOG.error("Exception while comparing row : " + e); } currentCompareRowInPeerTable = replicatedScanner.next(); break; @@ -204,9 +205,11 @@ public class VerifyReplication extends Configured implements Tool { Result sourceResult = sourceTable.get(new Get(row.getRow())); Result replicatedResult = replicatedTable.get(new Get(row.getRow())); Result.compareResults(sourceResult, replicatedResult); - context.getCounter(Counters.GOODROWS).increment(1); - if (verbose) { -LOG.info("Good row key: " + delimiter + Bytes.toString(row.getRow()) + delimiter); + if (!sourceResult.isEmpty()) { +context.getCounter(Counters.GOODROWS).increment(1); +if (verbose) { + LOG.info("Good row key: " + delimiter + Bytes.toString(row.getRow()) + delimiter); +} } return; } catch (Exception e) { @@ -320,6 +323,7 @@ public class VerifyReplication extends Configured implements Tool { conf.setLong(NAME+".startTime", startTime); conf.setLong(NAME+".endTime", endTime); conf.setInt(NAME +".sleepMsBeforeReCompare", sleepMsBeforeReCompare); +conf.set(NAME + ".delimiter", delimiter); conf.setBoolean(NAME +".verbose", verbose); if (families != null) { conf.set(NAME+".families", families);
[8/9] hbase git commit: HBASE-16701 rely on test category timeout instead of defining one on a specific test.
HBASE-16701 rely on test category timeout instead of defining one on a specific test. Signed-off-by: Umesh AgasheSigned-off-by: Yu Li Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b6a8018 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b6a8018 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b6a8018 Branch: refs/heads/hbase-12439 Commit: 6b6a80187693ebcecfb774af51a3e2c875223cda Parents: f5abe17 Author: Sean Busbey Authored: Wed Oct 5 17:23:20 2016 -0500 Committer: Sean Busbey Committed: Mon Oct 10 00:14:38 2016 -0500 -- .../java/org/apache/hadoop/hbase/regionserver/TestHRegion.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/6b6a8018/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index a69c0ee..612d6cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -6731,7 +6731,7 @@ public class TestHRegion { * HBASE-16429 Make sure no stuck if roll writer when ring buffer is filled with appends * @throws IOException if IO error occurred during test */ - @Test(timeout = 6) + @Test public void testWritesWhileRollWriter() throws IOException { int testCount = 10; int numRows = 1024;
[5/9] hbase git commit: HBASE-16794 TestDispatchMergingRegionsProcedure#testMergeRegionsConcurrently is flaky
HBASE-16794 TestDispatchMergingRegionsProcedure#testMergeRegionsConcurrently is flaky Signed-off-by: Matteo BertozziProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a8c6088 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a8c6088 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a8c6088 Branch: refs/heads/hbase-12439 Commit: 8a8c60889cf67b581d7adb4245e0bcc02cdfdc93 Parents: e06c367 Author: ChiaPing Tsai Authored: Sun Oct 9 16:52:54 2016 -0700 Committer: Matteo Bertozzi Committed: Sun Oct 9 16:53:29 2016 -0700 -- .../hbase/regionserver/CompactSplitThread.java | 5 +++ .../TestDispatchMergingRegionsProcedure.java| 45 +--- 2 files changed, 44 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8a8c6088/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index c1f82b9..a454f0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -724,6 +724,11 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi } @VisibleForTesting + public long getCompletedMergeTaskCount() { +return mergePool.getCompletedTaskCount(); + } + + @VisibleForTesting /** * Shutdown the long compaction thread pool. * Should only be used in unit test to prevent long compaction thread pool from stealing job http://git-wip-us.apache.org/repos/asf/hbase/blob/8a8c6088/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java index 601f22f..a7dd4a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hbase.master.procedure; +import java.io.IOException; import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,6 +38,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.D import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -125,20 +128,21 @@ public class TestDispatchMergingRegionsProcedure { regionsToMerge[0] = tableRegions.get(0); regionsToMerge[1] = tableRegions.get(1); +final int initCompletedTaskCount = countOfCompletedMergeTaskCount(); long procId = procExec.submitProcedure(new DispatchMergingRegionsProcedure( procExec.getEnvironment(), tableName, regionsToMerge, true)); ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); -assertRegionCount(tableName, 2); +assertRegionCount(tableName, 2, 1, initCompletedTaskCount); } /** * This tests two concurrent region merges */ - @Test(timeout=9) + @Test(timeout=6) public void testMergeRegionsConcurrently() throws Exception { -final TableName tableName = TableName.valueOf("testMergeTwoRegions"); +final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently"); final ProcedureExecutor procExec = getMasterProcedureExecutor(); List tableRegions = createTable(tableName, 4); @@ -150,6 +154,7 @@ public class TestDispatchMergingRegionsProcedure { regionsToMerge2[0] = tableRegions.get(2); regionsToMerge2[1] = tableRegions.get(3); +final int initCompletedTaskCount = countOfCompletedMergeTaskCount(); long procId1 = procExec.submitProcedure(new DispatchMergingRegionsProcedure( procExec.getEnvironment(), tableName, regionsToMerge1, true)); long procId2 =
[4/9] hbase git commit: HBASE-16791 Fix TestDispatchMergingRegionsProcedure
HBASE-16791 Fix TestDispatchMergingRegionsProcedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e06c3676 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e06c3676 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e06c3676 Branch: refs/heads/hbase-12439 Commit: e06c3676f1273f033e3e185ee9c1ec52c1c7cb31 Parents: 29d701a Author: Matteo BertozziAuthored: Sat Oct 8 15:32:34 2016 -0700 Committer: Matteo Bertozzi Committed: Sat Oct 8 15:32:34 2016 -0700 -- .../TestDispatchMergingRegionsProcedure.java| 102 +++ 1 file changed, 39 insertions(+), 63 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e06c3676/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java index 3612341..601f22f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java @@ -64,7 +64,7 @@ public class TestDispatchMergingRegionsProcedure { conf.setInt("hbase.master.maximum.ping.server.attempts", 3); conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1); -conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 3); +conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); } @BeforeClass @@ -119,18 +119,9 @@ public class TestDispatchMergingRegionsProcedure { final TableName tableName = TableName.valueOf("testMergeTwoRegions"); final ProcedureExecutor procExec = getMasterProcedureExecutor(); -HTableDescriptor desc = new HTableDescriptor(tableName); -desc.addFamily(new HColumnDescriptor(FAMILY)); -byte[][] splitRows = new byte[2][]; -splitRows[0] = new byte[]{(byte)'3'}; -splitRows[1] = new byte[]{(byte)'6'}; -admin.createTable(desc, splitRows); - -List tableRegions; -HRegionInfo [] regionsToMerge = new HRegionInfo[2]; +List tableRegions = createTable(tableName, 3); -tableRegions = admin.getTableRegions(tableName); -assertEquals(3, admin.getTableRegions(tableName).size()); +HRegionInfo[] regionsToMerge = new HRegionInfo[2]; regionsToMerge[0] = tableRegions.get(0); regionsToMerge[1] = tableRegions.get(1); @@ -139,7 +130,7 @@ public class TestDispatchMergingRegionsProcedure { ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); -assertEquals(2, admin.getTableRegions(tableName).size()); +assertRegionCount(tableName, 2); } /** @@ -150,20 +141,10 @@ public class TestDispatchMergingRegionsProcedure { final TableName tableName = TableName.valueOf("testMergeTwoRegions"); final ProcedureExecutor procExec = getMasterProcedureExecutor(); -HTableDescriptor desc = new HTableDescriptor(tableName); -desc.addFamily(new HColumnDescriptor(FAMILY)); -byte[][] splitRows = new byte[3][]; -splitRows[0] = new byte[]{(byte)'2'}; -splitRows[1] = new byte[]{(byte)'4'}; -splitRows[2] = new byte[]{(byte)'6'}; -admin.createTable(desc, splitRows); - -List tableRegions; -HRegionInfo [] regionsToMerge1 = new HRegionInfo[2]; -HRegionInfo [] regionsToMerge2 = new HRegionInfo[2]; +List tableRegions = createTable(tableName, 4); -tableRegions = admin.getTableRegions(tableName); -assertEquals(4, admin.getTableRegions(tableName).size()); +HRegionInfo[] regionsToMerge1 = new HRegionInfo[2]; +HRegionInfo[] regionsToMerge2 = new HRegionInfo[2]; regionsToMerge1[0] = tableRegions.get(0); regionsToMerge1[1] = tableRegions.get(1); regionsToMerge2[0] = tableRegions.get(2); @@ -178,7 +159,7 @@ public class TestDispatchMergingRegionsProcedure { ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); -assertEquals(2, admin.getTableRegions(tableName).size()); +assertRegionCount(tableName, 2); } @Test(timeout=6) @@ -186,18 +167,9 @@ public class TestDispatchMergingRegionsProcedure { final TableName tableName = TableName.valueOf("testMergeRegionsTwiceWithSameNonce"); final ProcedureExecutor procExec = getMasterProcedureExecutor(); -HTableDescriptor desc = new HTableDescriptor(tableName); -desc.addFamily(new
[7/9] hbase git commit: HBASE-16666 Add append and remove peer namespaces cmds for replication (Guanghao Zhang)
HBASE-1 Add append and remove peer namespaces cmds for replication (Guanghao Zhang) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f5abe17b Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f5abe17b Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f5abe17b Branch: refs/heads/hbase-12439 Commit: f5abe17bc66ae9b780daec3afb6f08e69a5cf392 Parents: ccde439 Author: tedyuAuthored: Sun Oct 9 21:22:50 2016 -0700 Committer: tedyu Committed: Sun Oct 9 21:22:50 2016 -0700 -- .../src/main/ruby/hbase/replication_admin.rb| 37 hbase-shell/src/main/ruby/shell.rb | 2 + .../shell/commands/append_peer_namespaces.rb| 44 + .../shell/commands/remove_peer_namespaces.rb| 41 + .../test/ruby/hbase/replication_admin_test.rb | 93 +++- 5 files changed, 214 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f5abe17b/hbase-shell/src/main/ruby/hbase/replication_admin.rb -- diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb index f99ccae..8aa158b 100644 --- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -205,10 +205,47 @@ module Hbase end end +# Add some namespaces for the specified peer +def add_peer_namespaces(id, namespaces) + unless namespaces.nil? +rpc = get_peer_config(id) +unless rpc.nil? + ns_set = rpc.getNamespaces() + if ns_set.nil? +ns_set = java.util.HashSet.new + end + namespaces.each do |n| +ns_set.add(n) + end + rpc.setNamespaces(ns_set) + @replication_admin.updatePeerConfig(id, rpc) +end + end +end + +# Remove some namespaces for the specified peer +def remove_peer_namespaces(id, namespaces) + unless namespaces.nil? +rpc = get_peer_config(id) +unless rpc.nil? + ns_set = rpc.getNamespaces() + unless ns_set.nil? +namespaces.each do |n| + ns_set.remove(n) +end + end + rpc.setNamespaces(ns_set) + @replication_admin.updatePeerConfig(id, rpc) +end + end +end + # Show the current namespaces config for the specified peer def show_peer_namespaces(peer_config) namespaces = peer_config.get_namespaces if !namespaces.nil? +namespaces = java.util.ArrayList.new(namespaces) +java.util.Collections.sort(namespaces) return namespaces.join(';') else return nil http://git-wip-us.apache.org/repos/asf/hbase/blob/f5abe17b/hbase-shell/src/main/ruby/shell.rb -- diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index ee508e9..02f8191 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -371,6 +371,8 @@ Shell.load_command_group( enable_peer disable_peer set_peer_namespaces +append_peer_namespaces +remove_peer_namespaces show_peer_tableCFs set_peer_tableCFs list_replicated_tables http://git-wip-us.apache.org/repos/asf/hbase/blob/f5abe17b/hbase-shell/src/main/ruby/shell/commands/append_peer_namespaces.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands/append_peer_namespaces.rb b/hbase-shell/src/main/ruby/shell/commands/append_peer_namespaces.rb new file mode 100644 index 000..2585754 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/append_peer_namespaces.rb @@ -0,0 +1,44 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands +class
[9/9] hbase git commit: Revert "HBASE-16750 hbase compilation failed on power system (Saravanan Krishnamoorthy)"
Revert "HBASE-16750 hbase compilation failed on power system (Saravanan Krishnamoorthy)" This reverts commit d1e40bf0bda4d82ab217e6b715e7c4dd5a6b9af2. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c35a722 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c35a722 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c35a722 Branch: refs/heads/hbase-12439 Commit: 3c35a722d9c1c77826d7c86ee204274bfdaae65f Parents: 6b6a801 Author: Dima SpivakAuthored: Mon Oct 10 10:08:41 2016 -0500 Committer: Dima Spivak Committed: Mon Oct 10 10:08:41 2016 -0500 -- pom.xml | 6 ++ 1 file changed, 2 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/3c35a722/pom.xml -- diff --git a/pom.xml b/pom.xml index 7715278..2d341c0 100644 --- a/pom.xml +++ b/pom.xml @@ -1009,7 +1009,7 @@ org.asciidoctor asciidoctorj-pdf -1.5.0-alpha.11 +1.5.0-alpha.6 @@ -1019,8 +1019,6 @@ coderay ${project.version} -${project.build.sourceDirectory} - @@ -1235,7 +1233,7 @@ 1.3.9-1 6.18 2.10.3 -1.5.3 +1.5.2.1 /usr /etc/hbase
[2/9] hbase git commit: 1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the code in compactMobFilesInBatch() to make it more readable.
1). Fix resource leak issue upon exception during mob compaction. 2). Reorg the code in compactMobFilesInBatch() to make it more readable. Signed-off-by: Jonathan M HsiehProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c7cae6be Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c7cae6be Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c7cae6be Branch: refs/heads/hbase-12439 Commit: c7cae6be3dccfaa63033b705ea9845f3f088aab6 Parents: 723d561 Author: Huaxiang Sun Authored: Fri Oct 7 15:47:06 2016 -0700 Committer: Jonathan M Hsieh Committed: Fri Oct 7 17:49:27 2016 -0700 -- .../compactions/PartitionedMobCompactor.java| 157 +++ .../TestPartitionedMobCompactor.java| 90 ++- 2 files changed, 178 insertions(+), 69 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c7cae6be/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java index 29b7e8a..33aecc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java @@ -229,8 +229,8 @@ public class PartitionedMobCompactor extends MobCompactor { } // archive the del files if all the mob files are selected. if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) { - LOG.info("After a mob compaction with all files selected, archiving the del files " -+ newDelPaths); + LOG.info( + "After a mob compaction with all files selected, archiving the del files " + newDelPaths); try { MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), newDelFiles); } catch (IOException e) { @@ -381,7 +381,7 @@ public class PartitionedMobCompactor extends MobCompactor { List filesToCompact, int batch, Path bulkloadPathOfPartition, Path bulkloadColumnPath, List newFiles) -throws IOException { + throws IOException { // open scanner to the selected mob files and del files. StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES); // the mob files to be compacted, not include the del files. @@ -392,62 +392,92 @@ public class PartitionedMobCompactor extends MobCompactor { StoreFileWriter writer = null; StoreFileWriter refFileWriter = null; Path filePath = null; -Path refFilePath = null; long mobCells = 0; +boolean cleanupTmpMobFile = false; +boolean cleanupBulkloadDirOfPartition = false; +boolean cleanupCommittedMobFile = false; +boolean closeReaders= true; + try { - writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getDate(), -tempPath, Long.MAX_VALUE, column.getCompactionCompressionType(), partition.getPartitionId() - .getStartKey(), compactionCacheConfig, cryptoContext); - filePath = writer.getPath(); - byte[] fileName = Bytes.toBytes(filePath.getName()); - // create a temp file and open a writer for it in the bulkloadPath - refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo -.getSecond().longValue(), compactionCacheConfig, cryptoContext); - refFilePath = refFileWriter.getPath(); - List cells = new ArrayList<>(); - boolean hasMore; - ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - do { -hasMore = scanner.next(cells, scannerContext); -for (Cell cell : cells) { - // write the mob cell to the mob file. - writer.append(cell); - // write the new reference cell to the store file. - KeyValue reference = MobUtils.createMobRefKeyValue(cell, fileName, tableNameTag); - refFileWriter.append(reference); - mobCells++; + try { +writer = MobUtils +.createWriter(conf, fs, column, partition.getPartitionId().getDate(), tempPath, +Long.MAX_VALUE, column.getCompactionCompressionType(), +partition.getPartitionId().getStartKey(), compactionCacheConfig, cryptoContext); +cleanupTmpMobFile = true; +filePath = writer.getPath(); +byte[]
hbase git commit: Revert "HBASE-16750 hbase compilation failed on power system (Saravanan Krishnamoorthy)"
Repository: hbase Updated Branches: refs/heads/master 6b6a80187 -> 3c35a722d Revert "HBASE-16750 hbase compilation failed on power system (Saravanan Krishnamoorthy)" This reverts commit d1e40bf0bda4d82ab217e6b715e7c4dd5a6b9af2. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c35a722 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c35a722 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c35a722 Branch: refs/heads/master Commit: 3c35a722d9c1c77826d7c86ee204274bfdaae65f Parents: 6b6a801 Author: Dima SpivakAuthored: Mon Oct 10 10:08:41 2016 -0500 Committer: Dima Spivak Committed: Mon Oct 10 10:08:41 2016 -0500 -- pom.xml | 6 ++ 1 file changed, 2 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/3c35a722/pom.xml -- diff --git a/pom.xml b/pom.xml index 7715278..2d341c0 100644 --- a/pom.xml +++ b/pom.xml @@ -1009,7 +1009,7 @@ org.asciidoctor asciidoctorj-pdf -1.5.0-alpha.11 +1.5.0-alpha.6 @@ -1019,8 +1019,6 @@ coderay ${project.version} -${project.build.sourceDirectory} - @@ -1235,7 +1233,7 @@ 1.3.9-1 6.18 2.10.3 -1.5.3 +1.5.2.1 /usr /etc/hbase
svn commit: r16435 - /dev/hbase/hbase-1.2.4RC0/
Author: busbey Date: Mon Oct 10 06:11:10 2016 New Revision: 16435 Log: stage HBase 1.2.4 RC0 Added: dev/hbase/hbase-1.2.4RC0/ dev/hbase/hbase-1.2.4RC0/1.2.3_1.2.4RC0_compat_report.html dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-bin.tar.gz (with props) dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-bin.tar.gz.asc dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-bin.tar.gz.md5 dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-bin.tar.gz.mds dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-bin.tar.gz.sha dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-src.tar.gz (with props) dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-src.tar.gz.asc dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-src.tar.gz.md5 dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-src.tar.gz.mds dev/hbase/hbase-1.2.4RC0/hbase-1.2.4-src.tar.gz.sha Added: dev/hbase/hbase-1.2.4RC0/1.2.3_1.2.4RC0_compat_report.html == --- dev/hbase/hbase-1.2.4RC0/1.2.3_1.2.4RC0_compat_report.html (added) +++ dev/hbase/hbase-1.2.4RC0/1.2.3_1.2.4RC0_compat_report.html Mon Oct 10 06:11:10 2016 @@ -0,0 +1,635 @@ + + +http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> +http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> + + + + +HBase: rel/1.2.3/bd63744 to 1.2.4RC0/04bd0ec compatibility report + + +body { +font-family:Arial, sans-serif; +background-color:White; +color:Black; +} +hr { +color:Black; +background-color:Black; +height:1px; +border:0; +} +h1 { +margin-bottom:0px; +padding-bottom:0px; +font-size:1.625em; +} +h2 { +margin-bottom:0px; +padding-bottom:0px; +font-size:1.25em; +white-space:nowrap; +} +div.symbols { +color:#003E69; +} +div.symbols i { +color:Brown; +} +span.section { +font-weight:bold; +cursor:pointer; +color:#003E69; +white-space:nowrap; +margin-left:5px; +} +span:hover.section { +color:#336699; +} +span.sect_aff { +cursor:pointer; +margin-left:7px; +padding-left:15px; +font-size:0.875em; +color:#cc3300; +} +span.ext { +font-weight:100; +} +span.jar { +color:#cc3300; +font-size:0.875em; +font-weight:bold; +} +div.jar_list { +padding-left:5px; +font-size:0.94em; +} +span.pkg_t { +color:#408080; +font-size:0.875em; +} +span.pkg { +color:#408080; +font-size:0.875em; +font-weight:bold; +} +span.cname { +color:Green; +font-size:0.875em; +font-weight:bold; +} +span.iname_b { +font-weight:bold; +font-size:1.1em; +} +span.iname_a { +color:#33; +font-weight:bold; +font-size:0.94em; +} +span.sym_p { +font-weight:normal; +white-space:normal; +} +span.sym_p span { +white-space:nowrap; +} +span.attr { +color:Black; +font-weight:100; +} +span.deprecated { +color:Red; +font-weight:bold; +font-family:Monaco, monospace; +} +div.affect { +padding-left:15px; +padding-bottom:10px; +font-size:0.87em; +font-style:italic; +line-height:0.75em; +} +div.affected { +padding-left:30px; +padding-top:10px; +} +table.ptable { +border-collapse:collapse; +border:1px outset black; +line-height:1em; +margin-left:15px; +margin-top:3px; +margin-bottom:3px; +width:900px; +} +table.ptable td { +border:1px solid Gray; +padding: 3px; +font-size:0.875em; +text-align:left; +vertical-align:top; +} +table.ptable th { +background-color:#ee; +font-weight:bold; +color:#33; +font-family:Verdana, Arial; +font-size:0.875em; +border:1px solid Gray; +text-align:center; +vertical-align:top; +white-space:nowrap; +padding: 3px; +} +table.summary { +border-collapse:collapse; +border:1px outset black; +} +table.summary th { +background-color:#ee; +font-weight:100; +text-align:left; +font-size:0.94em; +white-space:nowrap; +border:1px inset Gray; +padding: 3px; +} +table.summary td { +text-align:right; +white-space:nowrap; +border:1px inset Gray; +padding: 3px 5px 3px 10px; +} +span.mngl { +padding-left:15px; +font-size:0.875em; +cursor:text; +color:#44; +} +span.color_p { +font-style:italic; +color:Brown; +} +span.param { +font-style:italic; +} +span.focus_p { +font-style:italic; +background-color:#DCDCDC; +} +span.nowrap { +white-space:nowrap; +} +.passed { +background-color:#CCFFCC; +font-weight:100; +} +.warning { +background-color:#F4F4AF; +font-weight:100; +} +.failed { +background-color:#FF; +font-weight:100; +} +.new { +background-color:#C6DEFF; +font-weight:100; +} + +.compatible { +background-color:#CCFFCC; +font-weight:100; +} +.almost_compatible { +background-color:#FFDAA3; +font-weight:100; +} +.incompatible { +background-color:#FF; +font-weight:100; +} +.gray { +background-color:#DCDCDC; +font-weight:100; +} + +.top_ref { +