hbase git commit: HBASE-19135 TestWeakObjectPool time out
Repository: hbase Updated Branches: refs/heads/master 9382f391c -> d7cf88947 HBASE-19135 TestWeakObjectPool time out Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d7cf8894 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d7cf8894 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d7cf8894 Branch: refs/heads/master Commit: d7cf88947cca1e88340f1a613b464f7ccdbd80e6 Parents: 9382f39 Author: Michael Stack Authored: Tue Oct 31 09:38:38 2017 -0700 Committer: Michael Stack Committed: Tue Oct 31 16:17:31 2017 -0700 -- .../java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java | 7 ++- 1 file changed, 6 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/d7cf8894/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java -- diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java index 9dbbbd0..12d9877 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java @@ -22,15 +22,20 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; @Category({MiscTests.class, SmallTests.class}) public class TestWeakObjectPool { + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); ObjectPool pool; @Before @@ -89,7 +94,7 @@ public class TestWeakObjectPool { Assert.assertNotEquals(hash1, System.identityHashCode(obj3)); } - @Test(timeout=1000) + @Test public void testCongestion() throws Exception { final int THREAD_COUNT = 100;
hbase git commit: HBASE-19135 TestWeakObjectPool time out
Repository: hbase Updated Branches: refs/heads/branch-2 314759652 -> 426ab1e93 HBASE-19135 TestWeakObjectPool time out Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/426ab1e9 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/426ab1e9 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/426ab1e9 Branch: refs/heads/branch-2 Commit: 426ab1e9313cab5b8afe35ba11220421c32b9902 Parents: 3147596 Author: Michael Stack Authored: Tue Oct 31 09:38:38 2017 -0700 Committer: Michael Stack Committed: Tue Oct 31 16:16:56 2017 -0700 -- .../java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java | 7 ++- 1 file changed, 6 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/426ab1e9/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java -- diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java index 9dbbbd0..12d9877 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java @@ -22,15 +22,20 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; @Category({MiscTests.class, SmallTests.class}) public class TestWeakObjectPool { + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); ObjectPool pool; @Before @@ -89,7 +94,7 @@ public class TestWeakObjectPool { Assert.assertNotEquals(hash1, System.identityHashCode(obj3)); } - @Test(timeout=1000) + @Test public void testCongestion() throws Exception { final int THREAD_COUNT = 100;
[1/2] hbase git commit: HBASE-19138 Rare failure in TestLruBlockCache
Repository: hbase Updated Branches: refs/heads/branch-2 16012f93a -> 314759652 refs/heads/master 8237fdbd1 -> 9382f391c HBASE-19138 Rare failure in TestLruBlockCache Wait for the block count to drop to zero after awaiting shutdown of the executor pool Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/31475965 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/31475965 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/31475965 Branch: refs/heads/branch-2 Commit: 314759652f5e41ed149314ab94b8731058d67b73 Parents: 16012f9 Author: Andrew Purtell Authored: Tue Oct 31 11:35:48 2017 -0700 Committer: Andrew Purtell Committed: Tue Oct 31 13:40:55 2017 -0700 -- .../hadoop/hbase/io/hfile/TestLruBlockCache.java| 16 1 file changed, 16 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/31475965/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index b4dfc0c..4eec0bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -32,6 +32,10 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread; import org.apache.hadoop.hbase.util.ClassSize; @@ -56,6 +60,7 @@ public class TestLruBlockCache { final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); +final Configuration conf = HBaseConfiguration.create(); final LruBlockCache cache = new LruBlockCache(maxSize, blockSize); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -84,6 +89,17 @@ public class TestLruBlockCache { service.shutdown(); // The test may fail here if the evict thread frees the blocks too fast service.awaitTermination(10, TimeUnit.MINUTES); + Waiter.waitFor(conf, 1, 100, new ExplainingPredicate() { +@Override +public boolean evaluate() throws Exception { + return cache.getBlockCount() == 0; +} + +@Override +public String explainFailure() throws Exception { + return "Cache block count failed to return to 0"; +} + }); assertEquals(0, cache.getBlockCount()); assertEquals(cache.getOverhead(), cache.getCurrentSize()); }
[2/2] hbase git commit: HBASE-19138 Rare failure in TestLruBlockCache
HBASE-19138 Rare failure in TestLruBlockCache Wait for the block count to drop to zero after awaiting shutdown of the executor pool Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ac5b337 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ac5b337 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ac5b337 Branch: refs/heads/branch-1 Commit: 0ac5b337474a96a2e7a73fd2ab22bd25e852df12 Parents: d099263 Author: Andrew Purtell Authored: Tue Oct 31 11:35:48 2017 -0700 Committer: Andrew Purtell Committed: Tue Oct 31 11:36:19 2017 -0700 -- .../hadoop/hbase/io/hfile/TestLruBlockCache.java| 16 1 file changed, 16 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0ac5b337/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 305e16c..92408f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -31,6 +31,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread; import org.apache.hadoop.hbase.util.ClassSize; @@ -55,6 +59,7 @@ public class TestLruBlockCache { final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); +final Configuration conf = HBaseConfiguration.create(); final LruBlockCache cache = new LruBlockCache(maxSize, blockSize); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -83,6 +88,17 @@ public class TestLruBlockCache { service.shutdown(); // The test may fail here if the evict thread frees the blocks too fast service.awaitTermination(10, TimeUnit.MINUTES); + Waiter.waitFor(conf, 1, 100, new ExplainingPredicate() { +@Override +public boolean evaluate() throws Exception { + return cache.getBlockCount() == 0; +} + +@Override +public String explainFailure() throws Exception { + return "Cache block count failed to return to 0"; +} + }); assertEquals(0, cache.getBlockCount()); assertEquals(cache.getOverhead(), cache.getCurrentSize()); }
[2/2] hbase git commit: HBASE-19138 Rare failure in TestLruBlockCache
HBASE-19138 Rare failure in TestLruBlockCache Wait for the block count to drop to zero after awaiting shutdown of the executor pool Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9382f391 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9382f391 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9382f391 Branch: refs/heads/master Commit: 9382f391c8ce44cc4de22ab5b10a1f4e8046dff7 Parents: 8237fdb Author: Andrew Purtell Authored: Tue Oct 31 11:35:48 2017 -0700 Committer: Andrew Purtell Committed: Tue Oct 31 13:40:56 2017 -0700 -- .../hadoop/hbase/io/hfile/TestLruBlockCache.java| 16 1 file changed, 16 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/9382f391/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index b4dfc0c..4eec0bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -32,6 +32,10 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread; import org.apache.hadoop.hbase.util.ClassSize; @@ -56,6 +60,7 @@ public class TestLruBlockCache { final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); +final Configuration conf = HBaseConfiguration.create(); final LruBlockCache cache = new LruBlockCache(maxSize, blockSize); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -84,6 +89,17 @@ public class TestLruBlockCache { service.shutdown(); // The test may fail here if the evict thread frees the blocks too fast service.awaitTermination(10, TimeUnit.MINUTES); + Waiter.waitFor(conf, 1, 100, new ExplainingPredicate() { +@Override +public boolean evaluate() throws Exception { + return cache.getBlockCount() == 0; +} + +@Override +public String explainFailure() throws Exception { + return "Cache block count failed to return to 0"; +} + }); assertEquals(0, cache.getBlockCount()); assertEquals(cache.getOverhead(), cache.getCurrentSize()); }
[1/2] hbase git commit: HBASE-19138 Rare failure in TestLruBlockCache
Repository: hbase Updated Branches: refs/heads/branch-1 d099263e1 -> 0ac5b3374 refs/heads/branch-1.4 7390763da -> 5af2c1a3a HBASE-19138 Rare failure in TestLruBlockCache Wait for the block count to drop to zero after awaiting shutdown of the executor pool Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5af2c1a3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5af2c1a3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5af2c1a3 Branch: refs/heads/branch-1.4 Commit: 5af2c1a3aea69a7a9a9b061f5cc6297918894aa5 Parents: 7390763 Author: Andrew Purtell Authored: Tue Oct 31 11:35:48 2017 -0700 Committer: Andrew Purtell Committed: Tue Oct 31 11:35:48 2017 -0700 -- .../hadoop/hbase/io/hfile/TestLruBlockCache.java| 16 1 file changed, 16 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/5af2c1a3/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 305e16c..92408f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -31,6 +31,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread; import org.apache.hadoop.hbase.util.ClassSize; @@ -55,6 +59,7 @@ public class TestLruBlockCache { final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); +final Configuration conf = HBaseConfiguration.create(); final LruBlockCache cache = new LruBlockCache(maxSize, blockSize); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -83,6 +88,17 @@ public class TestLruBlockCache { service.shutdown(); // The test may fail here if the evict thread frees the blocks too fast service.awaitTermination(10, TimeUnit.MINUTES); + Waiter.waitFor(conf, 1, 100, new ExplainingPredicate() { +@Override +public boolean evaluate() throws Exception { + return cache.getBlockCount() == 0; +} + +@Override +public String explainFailure() throws Exception { + return "Cache block count failed to return to 0"; +} + }); assertEquals(0, cache.getBlockCount()); assertEquals(cache.getOverhead(), cache.getCurrentSize()); }
hbase git commit: HBASE-19137 Nightly test should make junit reports optional rather than attempt archive after reporting.
Repository: hbase Updated Branches: refs/heads/HBASE-19030 [deleted] ebc3975cb refs/heads/HBASE-19137 [created] 30e5ed0f2 HBASE-19137 Nightly test should make junit reports optional rather than attempt archive after reporting. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/30e5ed0f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/30e5ed0f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/30e5ed0f Branch: refs/heads/HBASE-19137 Commit: 30e5ed0f2787ba7cba89c8c047cda470a4bf4627 Parents: e0a530e Author: Sean Busbey Authored: Mon Oct 30 09:59:36 2017 -0500 Committer: Sean Busbey Committed: Tue Oct 31 15:08:07 2017 -0500 -- dev-support/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/30e5ed0f/dev-support/Jenkinsfile -- diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index a8de038..d7faeb6 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -169,6 +169,7 @@ curl -L -o personality.sh "${env.PROJET_PERSONALITY}" } post { always { + junit testResults: 'output-jdk7/**/target/**/TEST-*.xml', allowEmptyResults: true // zip surefire reports. sh '''#!/bin/bash -e if [ -d "${OUTPUTDIR}/archiver" ]; then @@ -186,7 +187,6 @@ curl -L -o personality.sh "${env.PROJET_PERSONALITY}" // env variables don't work in archive? or this has to be relative to WORKSPACE. :( archive 'output-jdk7/*' archive 'output-jdk7/**/*' - junit 'output-jdk7/**/target/**/TEST-*.xml' publishHTML target: [ allowMissing: true, keepAll: true, @@ -219,6 +219,7 @@ curl -L -o personality.sh "${env.PROJET_PERSONALITY}" } post { always { + junit testResults: 'output-jdk8/**/target/**/TEST-*.xml', allowEmptyResults: true // zip surefire reports. sh '''#!/bin/bash -e if [ -d "${OUTPUTDIR}/archiver" ]; then @@ -236,7 +237,6 @@ curl -L -o personality.sh "${env.PROJET_PERSONALITY}" // env variables don't work in archive? or this has to be relative to WORKSPACE. :( archive 'output-jdk8/*' archive 'output-jdk8/**/*' - junit 'output-jdk8/**/target/**/TEST-*.xml' publishHTML target: [ allowMissing: true, keepAll: true,
[hbase] Git Push Summary
Repository: hbase Updated Tags: refs/tags/2.0.0-alpha4RC0 [created] 4bebb7ced
[1/3] hbase git commit: HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case
Repository: hbase Updated Branches: refs/heads/branch-2 9dfd77595 -> 16012f93a http://git-wip-us.apache.org/repos/asf/hbase/blob/16012f93/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index e0d9fa2..3f4633a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -714,6 +714,9 @@ public class TestMobCompactor { while (fileList.length != num) { Thread.sleep(50); fileList = fs.listStatus(path); + for (FileStatus fileStatus: fileList) { +LOG.info(fileStatus); + } } } @@ -738,6 +741,7 @@ public class TestMobCompactor { candidates.remove(0); } c.bypass(); +c.complete(); } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/16012f93/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 268b352..421bd3f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -359,7 +359,7 @@ public class TestHRegion { /** * Create a WAL outside of the usual helper in - * {@link HBaseTestingUtility#createWal(Configuration, Path, HRegionInfo)} because that method + * {@link HBaseTestingUtility#createWal(Configuration, Path, RegionInfo)} because that method * doesn't play nicely with FaultyFileSystem. Call this method before overriding * {@code fs.file.impl}. * @param callingMethod a unique component for the path, probably the name of the test method. @@ -2386,6 +2386,9 @@ public class TestHRegion { FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + "testDataInMemoryWithoutWAL"); FSHLog hLog = new FSHLog(fs, rootDir, "testDataInMemoryWithoutWAL", CONF); +// This chunk creation is done throughout the code base. Do we want to move it into core? +// It is missing from this test. W/o it we NPE. +ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); @@ -2433,17 +2436,17 @@ public class TestHRegion { // save normalCPHost and replaced by mockedCPHost RegionCoprocessorHost normalCPHost = region.getCoprocessorHost(); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); -Answer answer = new Answer() { +// Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must +// do below format (from Mockito doc). +Mockito.doAnswer(new Answer() { @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { + public Object answer(InvocationOnMock invocation) throws Throwable { MiniBatchOperationInProgress mb = invocation.getArgumentAt(0, MiniBatchOperationInProgress.class); mb.addOperationsFromCP(0, new Mutation[]{addPut}); -return false; +return null; } -}; - when(mockedCPHost.preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class))) - .then(answer); + }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class)); region.setCoprocessorHost(mockedCPHost); region.put(originalPut); region.setCoprocessorHost(normalCPHost);
[2/3] hbase git commit: HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case
http://git-wip-us.apache.org/repos/asf/hbase/blob/16012f93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 0623b2c..517ac3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -680,7 +680,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { // convert duplicate append to get List results = region.get(ProtobufUtil.toGet(mutation, cellScanner), false, -nonceGroup, nonce); + nonceGroup, nonce); r = Result.create(results); } success = true; @@ -731,7 +731,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { // convert duplicate increment to get List results = region.get(ProtobufUtil.toGet(mutation, cells), false, nonceGroup, -nonce); + nonce); r = Result.create(results); } success = true; @@ -2251,7 +2251,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, checkOpen(); requestCount.increment(); HRegion region = getRegion(request.getRegion()); - boolean bypass = false; boolean loaded = false; Map> map = null; @@ -2278,15 +2277,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, familyPaths.add(new Pair<>(familyPath.getFamily().toByteArray(), familyPath.getPath())); } if (region.getCoprocessorHost() != null) { - bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths); + region.getCoprocessorHost().preBulkLoadHFile(familyPaths); } try { - if (!bypass) { -map = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null, -request.getCopyFile()); -if (map != null) { - loaded = true; -} + map = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null, + request.getCopyFile()); + if (map != null) { +loaded = true; } } finally { if (region.getCoprocessorHost() != null) { @@ -2457,16 +2454,19 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCallBack, RpcCallContext context) throws IOException { region.prepareGet(get); -List results = new ArrayList<>(); boolean stale = region.getRegionInfo().getReplicaId() != 0; + +// This method is almost the same as HRegion#get. +List results = new ArrayList<>(); +long before = EnvironmentEdgeManager.currentTime(); // pre-get CP hook if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost().preGet(get, results)) { +region.metricsUpdateForGet(results, before); return Result .create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } } -long before = EnvironmentEdgeManager.currentTime(); Scan scan = new Scan(get); if (scan.getLoadColumnFamiliesOnDemandValue() == null) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); @@ -2498,6 +2498,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, region.getCoprocessorHost().postGet(get, results); } region.metricsUpdateForGet(results, before); + return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } @@ -2729,11 +2730,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, byte[] qualifier = condition.getQualifier().toByteArray(); CompareOperator compareOp = CompareOperator.valueOf(condition.getCompareType().name()); - ByteArrayComparable comparator = -ProtobufUtil.toComparator(condition.getComparator()); + ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator()); if (region.getCoprocessorHost() != null) { -processed = region.getCoprocessorHost().preCheckAndPut( - row, family, qualifier, compareOp, comparator, put); +processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier, +compareOp, comparator, put); } if (processed == null) { boolean result = region.checkAndMutate(row, family, @@ -2760,11 +2760,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, byte[] family = condition.getFamily().toByteArray();
[3/3] hbase git commit: HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case
HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case Changes Coprocessor ObserverContext 'bypass' semantic. We flip the default so bypass is NOT supported on Observer invocations; only a couple of preXXX methods in RegionObserver allow it: e.g. preGet and prePut but not preFlush, etc. Everywhere else, we throw a DoesNotSupportBypassException if a Coprocessor Observer tries to invoke bypass. Master Observers can no longer stop or change move, split, assign, create table, etc. Ditto on complete, the mechanism that allowed a Coprocessor rule that all subsequent Coprocessors are skipped in an invocation chain; now, complete is only available to bypassable methods (and Coprocessors will get an exception if they try to 'complete' when it is not allowed). See javadoc for whether a Coprocessor Observer method supports 'bypass'. If no mention, 'bypass' is NOT supported. M hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java Added passing of 'bypassable' (and 'completable') and default 'result' argument to the Operation constructors rather than pass the excecution engine as parameters. Makes it so can clean up RegionObserverHost and make the calling clearer regards what is going on. Methods that support 'bypass' must set this flag on the Observer. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Refactoring in here is minor. A few methods that used support bypass no longer do so removed the check and the need of an if/else meant a left-shift in some code. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java Ditto M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java In here label explicitly those methods that are bypassable. Some changes to make sure we call the corresponding execOperation. TestMasterObserver had a bunch of test of bypass method. All removed or disabled. TODO: What to do w/ the Scanner methods. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16012f93 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16012f93 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16012f93 Branch: refs/heads/branch-2 Commit: 16012f93a698b8beaa472f102082dc2a414787d3 Parents: 9dfd775 Author: Michael Stack Authored: Thu Oct 26 14:55:53 2017 -0700 Committer: Michael Stack Committed: Tue Oct 31 12:49:51 2017 -0700 -- .../coprocessor/BypassCoprocessorException.java | 42 -- .../apache/hadoop/hbase/coprocessor/Export.java | 4 +- .../hbase/coprocessor/CoprocessorHost.java | 84 +-- .../hbase/coprocessor/MasterObserver.java | 57 +- .../hbase/coprocessor/ObserverContext.java | 39 +- .../hbase/coprocessor/ObserverContextImpl.java | 34 ++ .../hbase/coprocessor/RegionObserver.java | 13 +- .../hadoop/hbase/coprocessor/WALObserver.java | 14 +- .../org/apache/hadoop/hbase/master/HMaster.java | 62 +-- .../hbase/master/MasterCoprocessorHost.java | 101 ++-- .../hadoop/hbase/master/MasterRpcServices.java | 25 +- .../assignment/MergeTableRegionsProcedure.java | 15 +- .../assignment/SplitTableRegionProcedure.java | 5 +- .../hadoop/hbase/regionserver/HRegion.java | 163 +++--- .../hadoop/hbase/regionserver/HStore.java | 9 +- .../regionserver/MultiRowMutationProcessor.java | 4 + .../hbase/regionserver/RSRpcServices.java | 39 +- .../regionserver/RegionCoprocessorHost.java | 531 +-- .../RegionServerCoprocessorHost.java| 12 +- .../regionserver/SecureBulkLoadManager.java | 75 ++- .../hbase/regionserver/wal/AbstractFSWAL.java | 7 +- .../regionserver/wal/WALCoprocessorHost.java| 30 +- .../hbase/security/access/AccessController.java | 6 +- .../visibility/VisibilityController.java| 6 - .../hadoop/hbase/TestHRegionLocation.java | 1 - .../coprocessor/SampleRegionWALCoprocessor.java | 6 +- .../hbase/coprocessor/TestMasterObserver.java | 162 +- .../hbase/mob/compactions/TestMobCompactor.java | 4 + .../hadoop/hbase/regionserver/TestHRegion.java | 17 +- 29 files changed, 792 insertions(+), 775 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/16012f93/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java deleted file mode 100644 index 7c59501..000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessor
[2/3] hbase git commit: HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case
http://git-wip-us.apache.org/repos/asf/hbase/blob/8237fdbd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 0623b2c..517ac3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -680,7 +680,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { // convert duplicate append to get List results = region.get(ProtobufUtil.toGet(mutation, cellScanner), false, -nonceGroup, nonce); + nonceGroup, nonce); r = Result.create(results); } success = true; @@ -731,7 +731,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { // convert duplicate increment to get List results = region.get(ProtobufUtil.toGet(mutation, cells), false, nonceGroup, -nonce); + nonce); r = Result.create(results); } success = true; @@ -2251,7 +2251,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, checkOpen(); requestCount.increment(); HRegion region = getRegion(request.getRegion()); - boolean bypass = false; boolean loaded = false; Map> map = null; @@ -2278,15 +2277,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, familyPaths.add(new Pair<>(familyPath.getFamily().toByteArray(), familyPath.getPath())); } if (region.getCoprocessorHost() != null) { - bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths); + region.getCoprocessorHost().preBulkLoadHFile(familyPaths); } try { - if (!bypass) { -map = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null, -request.getCopyFile()); -if (map != null) { - loaded = true; -} + map = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null, + request.getCopyFile()); + if (map != null) { +loaded = true; } } finally { if (region.getCoprocessorHost() != null) { @@ -2457,16 +2454,19 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCallBack, RpcCallContext context) throws IOException { region.prepareGet(get); -List results = new ArrayList<>(); boolean stale = region.getRegionInfo().getReplicaId() != 0; + +// This method is almost the same as HRegion#get. +List results = new ArrayList<>(); +long before = EnvironmentEdgeManager.currentTime(); // pre-get CP hook if (region.getCoprocessorHost() != null) { if (region.getCoprocessorHost().preGet(get, results)) { +region.metricsUpdateForGet(results, before); return Result .create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } } -long before = EnvironmentEdgeManager.currentTime(); Scan scan = new Scan(get); if (scan.getLoadColumnFamiliesOnDemandValue() == null) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); @@ -2498,6 +2498,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, region.getCoprocessorHost().postGet(get, results); } region.metricsUpdateForGet(results, before); + return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } @@ -2729,11 +2730,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, byte[] qualifier = condition.getQualifier().toByteArray(); CompareOperator compareOp = CompareOperator.valueOf(condition.getCompareType().name()); - ByteArrayComparable comparator = -ProtobufUtil.toComparator(condition.getComparator()); + ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator()); if (region.getCoprocessorHost() != null) { -processed = region.getCoprocessorHost().preCheckAndPut( - row, family, qualifier, compareOp, comparator, put); +processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier, +compareOp, comparator, put); } if (processed == null) { boolean result = region.checkAndMutate(row, family, @@ -2760,11 +2760,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, byte[] family = condition.getFamily().toByteArray();
[1/3] hbase git commit: HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case
Repository: hbase Updated Branches: refs/heads/master 63ad16af0 -> 8237fdbd1 http://git-wip-us.apache.org/repos/asf/hbase/blob/8237fdbd/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index e0d9fa2..3f4633a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -714,6 +714,9 @@ public class TestMobCompactor { while (fileList.length != num) { Thread.sleep(50); fileList = fs.listStatus(path); + for (FileStatus fileStatus: fileList) { +LOG.info(fileStatus); + } } } @@ -738,6 +741,7 @@ public class TestMobCompactor { candidates.remove(0); } c.bypass(); +c.complete(); } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/8237fdbd/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 268b352..421bd3f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -359,7 +359,7 @@ public class TestHRegion { /** * Create a WAL outside of the usual helper in - * {@link HBaseTestingUtility#createWal(Configuration, Path, HRegionInfo)} because that method + * {@link HBaseTestingUtility#createWal(Configuration, Path, RegionInfo)} because that method * doesn't play nicely with FaultyFileSystem. Call this method before overriding * {@code fs.file.impl}. * @param callingMethod a unique component for the path, probably the name of the test method. @@ -2386,6 +2386,9 @@ public class TestHRegion { FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + "testDataInMemoryWithoutWAL"); FSHLog hLog = new FSHLog(fs, rootDir, "testDataInMemoryWithoutWAL", CONF); +// This chunk creation is done throughout the code base. Do we want to move it into core? +// It is missing from this test. W/o it we NPE. +ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); @@ -2433,17 +2436,17 @@ public class TestHRegion { // save normalCPHost and replaced by mockedCPHost RegionCoprocessorHost normalCPHost = region.getCoprocessorHost(); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); -Answer answer = new Answer() { +// Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must +// do below format (from Mockito doc). +Mockito.doAnswer(new Answer() { @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { + public Object answer(InvocationOnMock invocation) throws Throwable { MiniBatchOperationInProgress mb = invocation.getArgumentAt(0, MiniBatchOperationInProgress.class); mb.addOperationsFromCP(0, new Mutation[]{addPut}); -return false; +return null; } -}; - when(mockedCPHost.preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class))) - .then(answer); + }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class)); region.setCoprocessorHost(mockedCPHost); region.put(originalPut); region.setCoprocessorHost(normalCPHost);
[3/3] hbase git commit: HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case
HBASE-18770 Remove bypass method in ObserverContext and implement the 'bypass' logic case by case Changes Coprocessor ObserverContext 'bypass' semantic. We flip the default so bypass is NOT supported on Observer invocations; only a couple of preXXX methods in RegionObserver allow it: e.g. preGet and prePut but not preFlush, etc. Everywhere else, we throw a DoesNotSupportBypassException if a Coprocessor Observer tries to invoke bypass. Master Observers can no longer stop or change move, split, assign, create table, etc. Ditto on complete, the mechanism that allowed a Coprocessor rule that all subsequent Coprocessors are skipped in an invocation chain; now, complete is only available to bypassable methods (and Coprocessors will get an exception if they try to 'complete' when it is not allowed). See javadoc for whether a Coprocessor Observer method supports 'bypass'. If no mention, 'bypass' is NOT supported. M hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java Added passing of 'bypassable' (and 'completable') and default 'result' argument to the Operation constructors rather than pass the excecution engine as parameters. Makes it so can clean up RegionObserverHost and make the calling clearer regards what is going on. Methods that support 'bypass' must set this flag on the Observer. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Refactoring in here is minor. A few methods that used support bypass no longer do so removed the check and the need of an if/else meant a left-shift in some code. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java Ditto M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java In here label explicitly those methods that are bypassable. Some changes to make sure we call the corresponding execOperation. TestMasterObserver had a bunch of test of bypass method. All removed or disabled. TODO: What to do w/ the Scanner methods. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8237fdbd Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8237fdbd Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8237fdbd Branch: refs/heads/master Commit: 8237fdbd1b044b7ea86986902921f12e9e025b71 Parents: 63ad16a Author: Michael Stack Authored: Thu Oct 26 14:55:53 2017 -0700 Committer: Michael Stack Committed: Tue Oct 31 09:52:20 2017 -0700 -- .../coprocessor/BypassCoprocessorException.java | 42 -- .../apache/hadoop/hbase/coprocessor/Export.java | 4 +- .../hbase/coprocessor/CoprocessorHost.java | 84 +-- .../hbase/coprocessor/MasterObserver.java | 57 +- .../hbase/coprocessor/ObserverContext.java | 39 +- .../hbase/coprocessor/ObserverContextImpl.java | 34 ++ .../hbase/coprocessor/RegionObserver.java | 13 +- .../hadoop/hbase/coprocessor/WALObserver.java | 14 +- .../org/apache/hadoop/hbase/master/HMaster.java | 62 +-- .../hbase/master/MasterCoprocessorHost.java | 101 ++-- .../hadoop/hbase/master/MasterRpcServices.java | 25 +- .../assignment/MergeTableRegionsProcedure.java | 15 +- .../assignment/SplitTableRegionProcedure.java | 5 +- .../hadoop/hbase/regionserver/HRegion.java | 163 +++--- .../hadoop/hbase/regionserver/HStore.java | 9 +- .../regionserver/MultiRowMutationProcessor.java | 4 + .../hbase/regionserver/RSRpcServices.java | 39 +- .../regionserver/RegionCoprocessorHost.java | 531 +-- .../RegionServerCoprocessorHost.java| 12 +- .../regionserver/SecureBulkLoadManager.java | 75 ++- .../hbase/regionserver/wal/AbstractFSWAL.java | 7 +- .../regionserver/wal/WALCoprocessorHost.java| 30 +- .../hbase/security/access/AccessController.java | 6 +- .../visibility/VisibilityController.java| 6 - .../hadoop/hbase/TestHRegionLocation.java | 1 - .../coprocessor/SampleRegionWALCoprocessor.java | 6 +- .../hbase/coprocessor/TestMasterObserver.java | 162 +- .../hbase/mob/compactions/TestMobCompactor.java | 4 + .../hadoop/hbase/regionserver/TestHRegion.java | 17 +- 29 files changed, 792 insertions(+), 775 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8237fdbd/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorException.java deleted file mode 100644 index 7c59501..000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/BypassCoprocessorEx
[49/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/book.html -- diff --git a/book.html b/book.html index 13105b8..7316185 100644 --- a/book.html +++ b/book.html @@ -35232,7 +35232,7 @@ The server will return cellblocks compressed using this same compressor as long Version 3.0.0-SNAPSHOT -Last updated 2017-10-29 14:29:37 UTC +Last updated 2017-10-31 14:29:38 UTC http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/bulk-loads.html -- diff --git a/bulk-loads.html b/bulk-loads.html index 02fc088..3ef10d9 100644 --- a/bulk-loads.html +++ b/bulk-loads.html @@ -7,7 +7,7 @@ - + Apache HBase – Bulk Loads in Apache HBase (TM) @@ -311,7 +311,7 @@ under the License. --> https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31
[27/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress address = rpcServer.getListenerAddress(); -1
[42/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html index 77ed581..4323e6e 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html @@ -159,30 +159,42 @@ private Chunk -ChunkCreator.createChunk(boolean pool, - CompactingMemStore.IndexType chunkIndexType) +ChunkCreator.createChunk(boolean pool, + CompactingMemStore.IndexType chunkIndexType, + int size) Creates the chunk either onheap or offheap +private Chunk +ChunkCreator.createChunkForPool() + + (package private) Chunk ChunkCreator.getChunk() Creates and inits a chunk. - + (package private) Chunk ChunkCreator.MemStoreChunkPool.getChunk() Poll a chunk from the pool, reset it if not null, else create a new chunk to return if we have not yet created max allowed chunks count. - + (package private) Chunk ChunkCreator.getChunk(CompactingMemStore.IndexType chunkIndexType) Creates and inits a chunk. + +(package private) Chunk +ChunkCreator.getChunk(CompactingMemStore.IndexType chunkIndexType, +int size) +Creates and inits a chunk. + + (package private) Chunk ChunkCreator.getChunk(int id) @@ -192,19 +204,35 @@ MemStoreLABImpl.getCurrentChunk() +(package private) Chunk +ChunkCreator.getJumboChunk(CompactingMemStore.IndexType chunkIndexType, + int jumboSize) +Creates and inits a chunk of a special size, bigger than a regular chunk size. + + + Chunk ImmutableMemStoreLAB.getNewExternalChunk() + +Chunk +MemStoreLAB.getNewExternalChunk() + Chunk -MemStoreLAB.getNewExternalChunk() -Return a new empty chunk without considering this chunk as current - The space on this chunk will be allocated externally - +MemStoreLABImpl.getNewExternalChunk() Chunk -MemStoreLABImpl.getNewExternalChunk() +ImmutableMemStoreLAB.getNewExternalJumboChunk(int size) + + +Chunk +MemStoreLAB.getNewExternalJumboChunk(int size) + + +Chunk +MemStoreLABImpl.getNewExternalJumboChunk(int size) private Chunk http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CompactingMemStore.IndexType.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CompactingMemStore.IndexType.html b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CompactingMemStore.IndexType.html index 7692405..7f4831b 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CompactingMemStore.IndexType.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CompactingMemStore.IndexType.html @@ -147,8 +147,9 @@ the order they are declared. private Chunk -ChunkCreator.createChunk(boolean pool, - CompactingMemStore.IndexType chunkIndexType) +ChunkCreator.createChunk(boolean pool, + CompactingMemStore.IndexType chunkIndexType, + int size) Creates the chunk either onheap or offheap @@ -199,6 +200,20 @@ the order they are declared. Creates and inits a chunk. + +(package private) Chunk +ChunkCreator.getChunk(CompactingMemStore.IndexType chunkIndexType, +int size) +Creates and inits a chunk. + + + +(package private) Chunk +ChunkCreator.getJumboChunk(CompactingMemStore.IndexType chunkIndexType, + int jumboSize) +Creates and inits a chunk of a special size, bigger than a regular chunk size. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html index a138cbd..1317961 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html @@ -695,19 +695,19 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum(implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type -org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType -org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Acti
[28/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress address =
[24/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress address = rpcServer.getListenerAddress(); -1220if (address =
[25/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress addre
[09/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.html b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.html index 0d9e627..cb42b92 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.html +++ b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.html @@ -141,7 +141,7 @@ var activeTableTab = "activeTableTab"; -public static class TestReplicator.ReplicationEndpointForTest +public static class TestReplicator.ReplicationEndpointForTest extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint @@ -363,7 +363,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic batchCount -private static int batchCount +private static int batchCount @@ -372,7 +372,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic entriesCount -private static int entriesCount +private static int entriesCount @@ -381,7 +381,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic latch -private static final http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">Object latch +private static final http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">Object latch @@ -390,7 +390,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic useLatch -private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true"; title="class or interface in java.util.concurrent.atomic">AtomicBoolean useLatch +private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true"; title="class or interface in java.util.concurrent.atomic">AtomicBoolean useLatch @@ -407,7 +407,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic ReplicationEndpointForTest -public ReplicationEndpointForTest() +public ReplicationEndpointForTest() @@ -424,7 +424,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic resume -public static void resume() +public static void resume() @@ -433,7 +433,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic pause -public static void pause() +public static void pause() @@ -442,7 +442,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic await -public static void await() +public static void await() throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true"; title="class or interface in java.lang">InterruptedException Throws: @@ -456,7 +456,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic getBatchCount -public static int getBatchCount() +public static int getBatchCount() @@ -465,7 +465,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic setBatchCount -public static void setBatchCount(int i) +public static void setBatchCount(int i) @@ -474,7 +474,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic getEntriesCount -public static int getEntriesCount() +public static int getEntriesCount() @@ -483,7 +483,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic setEntriesCount -public static void setEntriesCount(int i) +public static void setEntriesCount(int i) @@ -492,7 +492,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic replicate -public boolean replicate(org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext replicateContext) +public boolean replicate(org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext replicateContext) Specified by: replicate in interface org.apache.hadoop.hbase.replication.ReplicationEndpoint @@ -507,7 +507,7 @@ extends org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplic createReplicator -protected org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.Replicator createReplicator(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true"; title="class or interface in java.util">List
[10/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html index 9a78d0e..044700b 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab"; -public class TestCompactingMemStore +public class TestCompactingMemStore extends TestDefaultMemStore compacted memstore test case @@ -218,7 +218,7 @@ extends Method and Description -private int +protected int addRowsByKeys(org.apache.hadoop.hbase.regionserver.AbstractMemStore hmc, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String[] keys) @@ -263,50 +263,46 @@ extends void -testFlatteningToCellChunkMap() - - -void testGet_memstoreAndSnapShot() - + void testGetNextRow() Test getNextRow from memstore - + void testPuttingBackChunksAfterFlushing() - + void testPuttingBackChunksWithOpeningPipelineScanner() - + void testPuttingBackChunksWithOpeningScanner() - + void testScanAcrossSnapshot2() A simple test which verifies the 3 possible states when scanning across snapshot. - + void testSnapshotting() Test memstore snapshots - + void testUpdateToTimeOfOldestEdit() Tests that the timeOfOldestEdit is updated correctly for the various edit operations in memstore. - + void testUpsertMemstoreSize() Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased @@ -348,7 +344,7 @@ extends LOG -private static final org.apache.commons.logging.Log LOG +private static final org.apache.commons.logging.Log LOG @@ -357,7 +353,7 @@ extends chunkCreator -protected static org.apache.hadoop.hbase.regionserver.ChunkCreator chunkCreator +protected static org.apache.hadoop.hbase.regionserver.ChunkCreator chunkCreator @@ -366,7 +362,7 @@ extends region -protected org.apache.hadoop.hbase.regionserver.HRegion region +protected org.apache.hadoop.hbase.regionserver.HRegion region @@ -375,7 +371,7 @@ extends regionServicesForStores -protected org.apache.hadoop.hbase.regionserver.RegionServicesForStores regionServicesForStores +protected org.apache.hadoop.hbase.regionserver.RegionServicesForStores regionServicesForStores @@ -384,7 +380,7 @@ extends store -protected org.apache.hadoop.hbase.regionserver.HStore store +protected org.apache.hadoop.hbase.regionserver.HStore store @@ -401,7 +397,7 @@ extends TestCompactingMemStore -public TestCompactingMemStore() +public TestCompactingMemStore() @@ -418,7 +414,7 @@ extends makeQualifier -protected static byte[] makeQualifier(int i1, +protected static byte[] makeQualifier(int i1, int i2) @@ -428,7 +424,7 @@ extends tearDown -public void tearDown() +public void tearDown() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -442,7 +438,7 @@ extends setUp -public void setUp() +public void setUp() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Overrides: @@ -458,7 +454,7 @@ extends compactingSetUp -protected void compactingSetUp() +protected void compactingSetUp() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -472,7 +468,7 @@ extends testScanAcrossSnapshot2 -public void testScanAcrossSnapshot2() +public void testScanAcrossSnapshot2() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException, http://docs.oracle.com/javase/8/docs/api/java/lang/CloneNotSupp
[18/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new
[01/51] [partial] hbase-site git commit: Published site at .
Repository: hbase-site Updated Branches: refs/heads/asf-site 26b2bd6f9 -> 35decbe40 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.ReplicatorForTest.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.ReplicatorForTest.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.ReplicatorForTest.html index c97fa47..3b16d00 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.ReplicatorForTest.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.ReplicationEndpointForTest.ReplicatorForTest.html @@ -56,379 +56,381 @@ 048import org.junit.AfterClass; 049import org.junit.BeforeClass; 050import org.junit.Test; -051import org.junit.experimental.categories.Category; -052 +051import org.junit.Ignore; +052import org.junit.experimental.categories.Category; 053 -054@Category(MediumTests.class) -055public class TestReplicator extends TestReplicationBase { -056 -057 static final Log LOG = LogFactory.getLog(TestReplicator.class); -058 static final int NUM_ROWS = 10; -059 -060 @BeforeClass -061 public static void setUpBeforeClass() throws Exception { -062// Set RPC size limit to 10kb (will be applied to both source and sink clusters) -063 conf1.setInt(RpcServer.MAX_REQUEST_SIZE, 1024 * 10); -064 TestReplicationBase.setUpBeforeClass(); -065admin.removePeer("2"); // Remove the peer set up for us by base class -066 } -067 -068 @Test -069 public void testReplicatorBatching() throws Exception { -070// Clear the tables -071truncateTable(utility1, tableName); -072truncateTable(utility2, tableName); -073 -074// Replace the peer set up for us by the base class with a wrapper for this test -075 admin.addPeer("testReplicatorBatching", -076 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -077 .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null); -078 -079 ReplicationEndpointForTest.setBatchCount(0); -080 ReplicationEndpointForTest.setEntriesCount(0); -081try { -082 ReplicationEndpointForTest.pause(); -083 try { -084// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -085// have to be replicated separately. -086final byte[] valueBytes = new byte[8 *1024]; -087for (int i = 0; i < NUM_ROWS; i++) { -088 htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) -089.addColumn(famName, null, valueBytes) -090 ); -091} -092 } finally { -093 ReplicationEndpointForTest.resume(); -094 } -095 -096 // Wait for replication to complete. -097 Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() { -098@Override -099public boolean evaluate() throws Exception { -100 return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS; -101} -102 -103@Override -104public String explainFailure() throws Exception { -105 return "We waited too long for expected replication of " + NUM_ROWS + " entries"; -106} -107 }); -108 -109 assertEquals("We sent an incorrect number of batches", NUM_ROWS, -110 ReplicationEndpointForTest.getBatchCount()); -111 assertEquals("We did not replicate enough rows", NUM_ROWS, -112utility2.countRows(htable2)); -113} finally { -114 admin.removePeer("testReplicatorBatching"); -115} -116 } -117 -118 @Test -119 public void testReplicatorWithErrors() throws Exception { -120// Clear the tables -121truncateTable(utility1, tableName); -122truncateTable(utility2, tableName); -123 -124// Replace the peer set up for us by the base class with a wrapper for this test -125 admin.addPeer("testReplicatorWithErrors", -126 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -127 .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()), -128null); -129 -130 FailureInjectingReplicationEndpointForTest.setBatchCount(0); -131 FailureInjectingReplicationEndpointForTest.setEntriesCount(0); -132try { -133 FailureInjectingReplicationEndpointForTest.pause(); -134 try { -135// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -136// have to be replicated separately. -137final byte[] valueBytes = new byte[8 *1024]; -138for (int i = 0; i < NUM_ROWS; i++) { -139 htable1.put(new Put(("row"+Integer.toStrin
[33/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Chunk.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Chunk.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Chunk.html index 073d380..9b39b6a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Chunk.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Chunk.html @@ -93,94 +93,98 @@ 085return this.fromPool; 086 } 087 -088 /** -089 * Actually claim the memory for this chunk. This should only be called from the thread that -090 * constructed the chunk. It is thread-safe against other threads calling alloc(), who will block -091 * until the allocation is complete. -092 */ -093 public void init() { -094assert nextFreeOffset.get() == UNINITIALIZED; -095try { -096 allocateDataBuffer(); -097} catch (OutOfMemoryError e) { -098 boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM); -099 assert failInit; // should be true. -100 throw e; -101} -102// Mark that it's ready for use -103// Move 4 bytes since the first 4 bytes are having the chunkid in it -104boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, Bytes.SIZEOF_INT); -105// We should always succeed the above CAS since only one thread -106// calls init()! -107Preconditions.checkState(initted, "Multiple threads tried to init same chunk"); -108 } -109 -110 abstract void allocateDataBuffer(); -111 -112 /** -113 * Reset the offset to UNINITIALIZED before before reusing an old chunk -114 */ -115 void reset() { -116if (nextFreeOffset.get() != UNINITIALIZED) { -117 nextFreeOffset.set(UNINITIALIZED); -118 allocCount.set(0); -119} -120 } -121 -122 /** -123 * Try to allocatesize
bytes from the chunk. -124 * If a chunk is tried to get allocated before init() call, the thread doing the allocation -125 * will be in busy-wait state as it will keep looping till the nextFreeOffset is set. -126 * @return the offset of the successful allocation, or -1 to indicate not-enough-space -127 */ -128 public int alloc(int size) { -129while (true) { -130 int oldOffset = nextFreeOffset.get(); -131 if (oldOffset == UNINITIALIZED) { -132// The chunk doesn't have its data allocated yet. -133// Since we found this in curChunk, we know that whoever -134// CAS-ed it there is allocating it right now. So spin-loop -135// shouldn't spin long! -136Thread.yield(); -137continue; -138 } -139 if (oldOffset == OOM) { -140// doh we ran out of ram. return -1 to chuck this away. -141return -1; +088 boolean isJumbo() { +089return size > ChunkCreator.getInstance().getChunkSize(); +090 } +091 +092 /** +093 * Actually claim the memory for this chunk. This should only be called from the thread that +094 * constructed the chunk. It is thread-safe against other threads calling alloc(), who will block +095 * until the allocation is complete. +096 */ +097 public void init() { +098assert nextFreeOffset.get() == UNINITIALIZED; +099try { +100 allocateDataBuffer(); +101} catch (OutOfMemoryError e) { +102 boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM); +103 assert failInit; // should be true. +104 throw e; +105} +106// Mark that it's ready for use +107// Move 4 bytes since the first 4 bytes are having the chunkid in it +108boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, Bytes.SIZEOF_INT); +109// We should always succeed the above CAS since only one thread +110// calls init()! +111Preconditions.checkState(initted, "Multiple threads tried to init same chunk"); +112 } +113 +114 abstract void allocateDataBuffer(); +115 +116 /** +117 * Reset the offset to UNINITIALIZED before before reusing an old chunk +118 */ +119 void reset() { +120if (nextFreeOffset.get() != UNINITIALIZED) { +121 nextFreeOffset.set(UNINITIALIZED); +122 allocCount.set(0); +123} +124 } +125 +126 /** +127 * Try to allocatesize
bytes from the chunk. +128 * If a chunk is tried to get allocated before init() call, the thread doing the allocation +129 * will be in busy-wait state as it will keep looping till the nextFreeOffset is set. +130 * @return the offset of the successful allocation, or -1 to indicate not-enough-space +131 */ +132 public int alloc(int size) { +133while (true) { +134 int oldOffset = nextFreeOffset.get(); +135 if (oldOffset == UNINITIALIZED) { +136// The chunk doesn't have its data allocated yet. +137// Since we found this in curChunk, we know that whoever +138// CAS-ed it there is allocating it rig
hbase git commit: HBASE-19100 Missing break in catch block of InterruptedException in HRegion#waitForFlushesAndCompactions
Repository: hbase Updated Branches: refs/heads/master bbb7e1924 -> 63ad16af0 HBASE-19100 Missing break in catch block of InterruptedException in HRegion#waitForFlushesAndCompactions Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/63ad16af Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/63ad16af Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/63ad16af Branch: refs/heads/master Commit: 63ad16af0c3544c335646b4d97dbea82c02ee849 Parents: bbb7e19 Author: tedyu Authored: Tue Oct 31 08:17:57 2017 -0700 Committer: tedyu Committed: Tue Oct 31 08:17:57 2017 -0700 -- .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/63ad16af/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 5f53fff..e8dd2cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1759,6 +1759,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // essentially ignore and propagate the interrupt back up LOG.warn("Interrupted while waiting"); interrupted = true; +break; } } } finally {
[37/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html index 358baa9..8e0c134 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html @@ -26,264 +26,269 @@ 018 019package org.apache.hadoop.hbase.backup.impl; 020 -021import java.io.IOException; -022import java.nio.ByteBuffer; -023import java.util.ArrayList; -024import java.util.Arrays; -025import java.util.HashMap; -026import java.util.HashSet; -027import java.util.List; -028import java.util.Map; -029import java.util.Set; -030import java.util.TreeSet; -031 -032import org.apache.commons.lang3.StringUtils; -033import org.apache.commons.logging.Log; -034import org.apache.commons.logging.LogFactory; -035import org.apache.hadoop.conf.Configuration; -036import org.apache.hadoop.fs.Path; -037import org.apache.hadoop.hbase.TableName; -038import org.apache.hadoop.hbase.backup.BackupType; -039import org.apache.hadoop.hbase.backup.HBackupFileSystem; -040import org.apache.hadoop.hbase.backup.RestoreRequest; -041import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -042import org.apache.hadoop.hbase.backup.util.BackupUtils; -043import org.apache.hadoop.hbase.backup.util.RestoreTool; -044import org.apache.yetus.audience.InterfaceAudience; -045import org.apache.hadoop.hbase.client.Admin; -046import org.apache.hadoop.hbase.client.Connection; -047import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; -048import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem; -049 -050/** -051 * Restore table implementation -052 * -053 */ -054@InterfaceAudience.Private -055public class RestoreTablesClient { -056 private static final Log LOG = LogFactory.getLog(RestoreTablesClient.class); -057 -058 private Configuration conf; -059 private Connection conn; -060 private String backupId; -061 private TableName[] sTableArray; -062 private TableName[] tTableArray; -063 private String targetRootDir; -064 private boolean isOverwrite; -065 -066 public RestoreTablesClient(Connection conn, RestoreRequest request) throws IOException { -067this.targetRootDir = request.getBackupRootDir(); -068this.backupId = request.getBackupId(); -069this.sTableArray = request.getFromTables(); -070this.tTableArray = request.getToTables(); -071if (tTableArray == null || tTableArray.length == 0) { -072 this.tTableArray = sTableArray; -073} -074this.isOverwrite = request.isOverwrite(); -075this.conn = conn; -076this.conf = conn.getConfiguration(); -077 -078 } +021import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; +022 +023import java.io.IOException; +024import java.nio.ByteBuffer; +025import java.util.ArrayList; +026import java.util.Arrays; +027import java.util.HashMap; +028import java.util.HashSet; +029import java.util.List; +030import java.util.Map; +031import java.util.Set; +032import java.util.TreeSet; +033 +034import org.apache.commons.lang3.StringUtils; +035import org.apache.commons.logging.Log; +036import org.apache.commons.logging.LogFactory; +037import org.apache.hadoop.conf.Configuration; +038import org.apache.hadoop.fs.Path; +039import org.apache.hadoop.hbase.TableName; +040import org.apache.hadoop.hbase.backup.BackupType; +041import org.apache.hadoop.hbase.backup.HBackupFileSystem; +042import org.apache.hadoop.hbase.backup.RestoreRequest; +043import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +044import org.apache.hadoop.hbase.backup.util.BackupUtils; +045import org.apache.hadoop.hbase.backup.util.RestoreTool; +046import org.apache.yetus.audience.InterfaceAudience; +047import org.apache.hadoop.hbase.client.Admin; +048import org.apache.hadoop.hbase.client.Connection; +049import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; +050import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem; +051 +052/** +053 * Restore table implementation +054 * +055 */ +056@InterfaceAudience.Private +057public class RestoreTablesClient { +058 private static final Log LOG = LogFactory.getLog(RestoreTablesClient.class); +059 +060 private Configuration conf; +061 private Connection conn; +062 private String backupId; +063 private TableName[] sTableArray; +064 private TableName[] tTableArray; +065 private String targetRootDir; +066 private boolean isOverwrite; +067 +068 public RestoreTablesClient(Connection conn, RestoreRequest request) throws IOException { +069this.targetRootDir = request.getBackupRootDir(); +070this.backupId = request.getBackupId(); +
[15/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Testing.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Testing.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Testing.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Testing.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Testing.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -192 -193 try
[08/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.html index 3925972..c87c7d9 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.html @@ -66,19 +66,19 @@ 058return new Object[] { "SMALL_CHUNKS", "NORMAL_CHUNKS" }; // test with different chunk sizes 059 } 060 private static final int NUM_OF_CELLS = 4; -061 private Cell ascCells[]; -062 private CellArrayMap ascCbOnHeap; -063 private Cell descCells[]; -064 private CellArrayMap descCbOnHeap; -065 private final static Configuration CONF = new Configuration(); -066 private KeyValue lowerOuterCell; -067 private KeyValue upperOuterCell; -068 +061 private static final int SMALL_CHUNK_SIZE = 64; +062 private Cell ascCells[]; +063 private CellArrayMap ascCbOnHeap; +064 private Cell descCells[]; +065 private CellArrayMap descCbOnHeap; +066 private final static Configuration CONF = new Configuration(); +067 private KeyValue lowerOuterCell; +068 private KeyValue upperOuterCell; 069 -070 private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in array -071 private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk in array -072 private CellChunkMap ascMultCCM; // testing ascending CellChunkMap with multiple chunks in array -073 private CellChunkMap descMultCCM;// testing descending CellChunkMap with multiple chunks in array +070 +071 private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in array +072 private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk in array +073 private final boolean smallChunks; 074 private static ChunkCreator chunkCreator; 075 076 @@ -89,243 +89,295 @@ 081 chunkCreator = ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 082 globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); 083 assertTrue(chunkCreator != null); -084} else { -085 // chunkCreator with smaller chunk size, so only 3 cell-representations can accommodate a chunk -086 chunkCreator = ChunkCreator.initialize(64, false, -087 globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); -088 assertTrue(chunkCreator != null); -089 -090} -091 } -092 -093 @Before -094 @Override -095 public void setUp() throws Exception { -096super.setUp(); -097 -098// create array of Cells to bass to the CellFlatMap under CellSet -099final byte[] one = Bytes.toBytes(15); -100final byte[] two = Bytes.toBytes(25); -101final byte[] three = Bytes.toBytes(35); -102final byte[] four = Bytes.toBytes(45); -103 -104final byte[] f = Bytes.toBytes("f"); -105final byte[] q = Bytes.toBytes("q"); -106final byte[] v = Bytes.toBytes(4); -107 -108final KeyValue kv1 = new KeyValue(one, f, q, 10, v); -109final KeyValue kv2 = new KeyValue(two, f, q, 20, v); -110final KeyValue kv3 = new KeyValue(three, f, q, 30, v); -111final KeyValue kv4 = new KeyValue(four, f, q, 40, v); -112lowerOuterCell = new KeyValue(Bytes.toBytes(10), f, q, 10, v); -113upperOuterCell = new KeyValue(Bytes.toBytes(50), f, q, 10, v); -114ascCells = new Cell[] {kv1,kv2,kv3,kv4}; -115ascCbOnHeap = new CellArrayMap(CellComparatorImpl.COMPARATOR,ascCells,0,NUM_OF_CELLS,false); -116descCells = new Cell[] {kv4,kv3,kv2,kv1}; -117descCbOnHeap = new CellArrayMap(CellComparatorImpl.COMPARATOR,descCells,0,NUM_OF_CELLS,true); -118 -119 CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); -120 CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); -121ChunkCreator.chunkPoolDisabled = false; -122 -123// create ascending and descending CellChunkMaps -124// according to parameter, once built with normal chunks and at second with small chunks -125ascCCM = setUpCellChunkMap(true); -126descCCM = setUpCellChunkMap(false); -127 +084 smallChunks = false; +085} else { +086 // chunkCreator with smaller chunk size, so only 3 cell-representations can accommodate a chunk +087 chunkCreator = ChunkCreator.initialize(SMALL_CHUNK_SIZE, false, +088 globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); +089 assertTrue(chunkCreator != null); +090 smallChunks = true; +091} +092 } +093 +094 @Before +095 @Override +096 public void setUp() throws Exception { +097super.setUp(); +098 +099// create array of Cells to bass to the CellFlatMap under CellSet +100final by
[35/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html index b4165bf..dec7409 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html @@ -1560,1985 +1560,1986 @@ 1552 * @return Client info for use as prefix on an audit log string; who did an action 1553 */ 1554 public String getClientIdAuditPrefix() { -1555return "Client=" + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress(); -1556 } -1557 -1558 /** -1559 * Switch for the background CatalogJanitor thread. -1560 * Used for testing. The thread will continue to run. It will just be a noop -1561 * if disabled. -1562 * @param b If false, the catalog janitor won't do anything. -1563 */ -1564 public void setCatalogJanitorEnabled(final boolean b) { -1565 this.catalogJanitorChore.setEnabled(b); -1566 } -1567 -1568 @Override -1569 public long mergeRegions( -1570 final RegionInfo[] regionsToMerge, -1571 final boolean forcible, -1572 final long nonceGroup, -1573 final long nonce) throws IOException { -1574checkInitialized(); -1575 -1576assert(regionsToMerge.length == 2); -1577 -1578TableName tableName = regionsToMerge[0].getTable(); -1579if (tableName == null || regionsToMerge[1].getTable() == null) { -1580 throw new UnknownRegionException ("Can't merge regions without table associated"); -1581} -1582 -1583if (!tableName.equals(regionsToMerge[1].getTable())) { -1584 throw new IOException ( -1585"Cannot merge regions from two different tables " + regionsToMerge[0].getTable() -1586+ " and " + regionsToMerge[1].getTable()); -1587} -1588 -1589if (RegionInfo.COMPARATOR.compare(regionsToMerge[0], regionsToMerge[1]) == 0) { -1590 throw new MergeRegionException( -1591"Cannot merge a region to itself " + regionsToMerge[0] + ", " + regionsToMerge[1]); -1592} -1593 -1594return MasterProcedureUtil.submitProcedure( -1595new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -1596 @Override -1597 protected void run() throws IOException { -1598 getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); -1599 -1600 LOG.info(getClientIdAuditPrefix() + " Merge regions " + -1601 regionsToMerge[0].getEncodedName() + " and " + regionsToMerge[1].getEncodedName()); -1602 -1603submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), -1604 regionsToMerge, forcible)); -1605 -1606 getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); -1607 } -1608 -1609 @Override -1610 protected String getDescription() { -1611return "MergeTableProcedure"; -1612 } -1613}); -1614 } -1615 -1616 @Override -1617 public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, -1618 final long nonceGroup, final long nonce) -1619 throws IOException { -1620checkInitialized(); -1621return MasterProcedureUtil.submitProcedure( -1622new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -1623 @Override -1624 protected void run() throws IOException { -1625 getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); -1626 LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); -1627 -1628// Execute the operation asynchronously -1629 submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); -1630 } -1631 -1632 @Override -1633 protected String getDescription() { -1634return "SplitTableProcedure"; -1635 } -1636}); -1637 } -1638 -1639 // Public so can be accessed by tests. Blocks until move is done. -1640 // Replace with an async implementation from which you can get -1641 // a success/failure result. -1642 @VisibleForTesting -1643 public void move(final byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException { -1644RegionState regionState = assignmentManager.getRegionStates(). -1645 getRegionState(Bytes.toString(encodedRegionName)); -1646 -1647RegionInfo hri; -1648if (regionState != null) { -1649 hri = regionState.getRegion(); -1650} else { -1651 throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); -1652} -1653 -1654ServerName dest; -1655Listexclude = hri.getTable().isSystemTable() ? assignmentManager.ge
[30/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html index 0f71c4c..bc0c1ea 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html @@ -53,60 +53,79 @@ 045throw new IllegalStateException("This is an Immutable MemStoreLAB."); 046 } 047 -048 @Override -049 // returning a new chunk, without replacing current chunk, -050 // the space on this chunk will be allocated externally -051 // use the first MemStoreLABImpl in the list -052 public Chunk getNewExternalChunk() { -053MemStoreLAB mslab = this.mslabs.get(0); -054return mslab.getNewExternalChunk(); -055 } -056 -057 @Override -058 public void close() { -059// 'openScannerCount' here tracks the scanners opened on segments which directly refer to this -060// MSLAB. The individual MSLABs this refers also having its own 'openScannerCount'. The usage of -061// the variable in close() and decScannerCount() is as as that in HeapMemstoreLAB. Here the -062// close just delegates the call to the individual MSLABs. The actual return of the chunks to -063// MSLABPool will happen within individual MSLABs only (which is at the leaf level). -064// Say an ImmutableMemStoreLAB is created over 2 HeapMemStoreLABs at some point and at that time -065// both of them were referred by ongoing scanners. So they have > 0 'openScannerCount'. Now over -066// the new Segment some scanners come in and this MSLABs 'openScannerCount' also goes up and -067// then come down on finish of scanners. Now a close() call comes to this Immutable MSLAB. As -068// it's 'openScannerCount' is zero it will call close() on both of the Heap MSLABs. Say by that -069// time the old scanners on one of the MSLAB got over where as on the other, still an old -070// scanner is going on. The call close() on that MSLAB will not close it immediately but will -071// just mark it for closure as it's 'openScannerCount' still > 0. Later once the old scan is -072// over, the decScannerCount() call will do the actual close and return of the chunks. -073this.closed = true; -074// When there are still on going scanners over this MSLAB, we will defer the close until all -075// scanners finish. We will just mark it for closure. See #decScannerCount(). This will be -076// called at end of every scan. When it is marked for closure and scanner count reached 0, we -077// will do the actual close then. -078 checkAndCloseMSLABs(openScannerCount.get()); -079 } -080 -081 private void checkAndCloseMSLABs(int openScanners) { -082if (openScanners == 0) { -083 for (MemStoreLAB mslab : this.mslabs) { -084mslab.close(); -085 } -086} -087 } -088 -089 @Override -090 public void incScannerCount() { -091 this.openScannerCount.incrementAndGet(); -092 } -093 -094 @Override -095 public void decScannerCount() { -096int count = this.openScannerCount.decrementAndGet(); -097if (this.closed) { -098 checkAndCloseMSLABs(count); -099} -100 } -101} +048 /* Creating chunk to be used as index chunk in CellChunkMap, part of the chunks array. +049 ** Returning a new chunk, without replacing current chunk, +050 ** meaning MSLABImpl does not make the returned chunk as CurChunk. +051 ** The space on this chunk will be allocated externally. +052 ** The interface is only for external callers +053 */ +054 @Override +055 public Chunk getNewExternalChunk() { +056MemStoreLAB mslab = this.mslabs.get(0); +057return mslab.getNewExternalChunk(); +058 } +059 +060 /* Creating chunk to be used as data chunk in CellChunkMap. +061 ** This chunk is bigger the normal constant chunk size, and thus called JumboChunk it is used for +062 ** jumbo cells (which size is bigger than normal chunks). +063 ** Jumbo Chunks are needed only for CCM and thus are created only in +064 ** CompactingMemStore.IndexType.CHUNK_MAP type. +065 ** Returning a new chunk, without replacing current chunk, +066 ** meaning MSLABImpl does not make the returned chunk as CurChunk. +067 ** The space on this chunk will be allocated externally. +068 ** The interface is only for external callers +069 */ +070 @Override +071 public Chunk getNewExternalJumboChunk(int size) { +072MemStoreLAB mslab = this.mslabs.get(0); +073return mslab.getNewExternalJumboChunk(size); +074 } +075 +076 @Override +077 public void close() { +078// 'openScannerCount' here tracks the scanners opened on segments which directly ref
[36/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html index b4165bf..dec7409 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html @@ -1560,1985 +1560,1986 @@ 1552 * @return Client info for use as prefix on an audit log string; who did an action 1553 */ 1554 public String getClientIdAuditPrefix() { -1555return "Client=" + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress(); -1556 } -1557 -1558 /** -1559 * Switch for the background CatalogJanitor thread. -1560 * Used for testing. The thread will continue to run. It will just be a noop -1561 * if disabled. -1562 * @param b If false, the catalog janitor won't do anything. -1563 */ -1564 public void setCatalogJanitorEnabled(final boolean b) { -1565 this.catalogJanitorChore.setEnabled(b); -1566 } -1567 -1568 @Override -1569 public long mergeRegions( -1570 final RegionInfo[] regionsToMerge, -1571 final boolean forcible, -1572 final long nonceGroup, -1573 final long nonce) throws IOException { -1574checkInitialized(); -1575 -1576assert(regionsToMerge.length == 2); -1577 -1578TableName tableName = regionsToMerge[0].getTable(); -1579if (tableName == null || regionsToMerge[1].getTable() == null) { -1580 throw new UnknownRegionException ("Can't merge regions without table associated"); -1581} -1582 -1583if (!tableName.equals(regionsToMerge[1].getTable())) { -1584 throw new IOException ( -1585"Cannot merge regions from two different tables " + regionsToMerge[0].getTable() -1586+ " and " + regionsToMerge[1].getTable()); -1587} -1588 -1589if (RegionInfo.COMPARATOR.compare(regionsToMerge[0], regionsToMerge[1]) == 0) { -1590 throw new MergeRegionException( -1591"Cannot merge a region to itself " + regionsToMerge[0] + ", " + regionsToMerge[1]); -1592} -1593 -1594return MasterProcedureUtil.submitProcedure( -1595new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -1596 @Override -1597 protected void run() throws IOException { -1598 getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); -1599 -1600 LOG.info(getClientIdAuditPrefix() + " Merge regions " + -1601 regionsToMerge[0].getEncodedName() + " and " + regionsToMerge[1].getEncodedName()); -1602 -1603submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), -1604 regionsToMerge, forcible)); -1605 -1606 getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); -1607 } -1608 -1609 @Override -1610 protected String getDescription() { -1611return "MergeTableProcedure"; -1612 } -1613}); -1614 } -1615 -1616 @Override -1617 public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, -1618 final long nonceGroup, final long nonce) -1619 throws IOException { -1620checkInitialized(); -1621return MasterProcedureUtil.submitProcedure( -1622new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -1623 @Override -1624 protected void run() throws IOException { -1625 getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); -1626 LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); -1627 -1628// Execute the operation asynchronously -1629 submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); -1630 } -1631 -1632 @Override -1633 protected String getDescription() { -1634return "SplitTableProcedure"; -1635 } -1636}); -1637 } -1638 -1639 // Public so can be accessed by tests. Blocks until move is done. -1640 // Replace with an async implementation from which you can get -1641 // a success/failure result. -1642 @VisibleForTesting -1643 public void move(final byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException { -1644RegionState regionState = assignmentManager.getRegionStates(). -1645 getRegionState(Bytes.toString(encodedRegionName)); -1646 -1647RegionInfo hri; -1648if (regionState != null) { -1649 hri = regionState.getRegion(); -1650} else { -1651 throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); -1652} -1653 -1654ServerName dest; -1655Listexclude = hri.getTable().isSystemT
[38/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html index de32a91..805df1a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html @@ -26,378 +26,390 @@ 018 019package org.apache.hadoop.hbase.backup.impl; 020 -021import java.io.IOException; -022import java.net.URI; -023import java.net.URISyntaxException; -024import java.util.ArrayList; -025import java.util.HashMap; -026import java.util.List; -027import java.util.Map; -028import java.util.Set; -029import java.util.TreeMap; -030 -031import org.apache.commons.lang3.StringUtils; -032import org.apache.commons.logging.Log; -033import org.apache.commons.logging.LogFactory; -034import org.apache.hadoop.fs.FileSystem; -035import org.apache.hadoop.fs.Path; -036import org.apache.hadoop.hbase.TableName; -037import org.apache.hadoop.hbase.backup.BackupCopyJob; -038import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; -039import org.apache.hadoop.hbase.backup.BackupRequest; -040import org.apache.hadoop.hbase.backup.BackupRestoreFactory; -041import org.apache.hadoop.hbase.backup.BackupType; -042import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob; -043import org.apache.hadoop.hbase.backup.util.BackupUtils; -044import org.apache.hadoop.hbase.client.Admin; -045import org.apache.hadoop.hbase.client.Connection; -046import org.apache.hadoop.hbase.mapreduce.WALPlayer; -047import org.apache.hadoop.hbase.util.Bytes; -048import org.apache.hadoop.hbase.util.FSUtils; -049import org.apache.hadoop.hbase.util.HFileArchiveUtil; -050import org.apache.hadoop.hbase.util.Pair; -051import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -052import org.apache.hadoop.util.Tool; -053import org.apache.yetus.audience.InterfaceAudience; -054 -055/** -056 * Incremental backup implementation. -057 * See the {@link #execute() execute} method. -058 * -059 */ -060@InterfaceAudience.Private -061public class IncrementalTableBackupClient extends TableBackupClient { -062 private static final Log LOG = LogFactory.getLog(IncrementalTableBackupClient.class); -063 -064 protected IncrementalTableBackupClient() { -065 } -066 -067 public IncrementalTableBackupClient(final Connection conn, final String backupId, -068 BackupRequest request) throws IOException { -069super(conn, backupId, request); -070 } -071 -072 protected ListfilterMissingFiles(List incrBackupFileList) throws IOException { -073FileSystem fs = FileSystem.get(conf); -074List list = new ArrayList (); -075for (String file : incrBackupFileList) { -076 Path p = new Path(file); -077 if (fs.exists(p) || isActiveWalPath(p)) { -078list.add(file); -079 } else { -080LOG.warn("Can't find file: " + file); -081 } -082} -083return list; -084 } -085 -086 /** -087 * Check if a given path is belongs to active WAL directory -088 * @param p path -089 * @return true, if yes -090 */ -091 protected boolean isActiveWalPath(Path p) { -092return !AbstractFSWALProvider.isArchivedLogFile(p); -093 } -094 -095 protected static int getIndex(TableName tbl, List sTableList) { -096if (sTableList == null) return 0; -097for (int i = 0; i < sTableList.size(); i++) { -098 if (tbl.equals(sTableList.get(i))) { -099return i; -100 } -101} -102return -1; -103 } -104 -105 /* -106 * Reads bulk load records from backup table, iterates through the records and forms the paths -107 * for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination -108 * @param sTableList list of tables to be backed up -109 * @return map of table to List of files -110 */ -111 protected Map >[] handleBulkLoad(List sTableList) throws IOException { -112Map >[] mapForSrc = new Map[sTableList.size()]; -113List activeFiles = new ArrayList (); -114List archiveFiles = new ArrayList (); -115Pair
[02/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html index c97fa47..3b16d00 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html @@ -56,379 +56,381 @@ 048import org.junit.AfterClass; 049import org.junit.BeforeClass; 050import org.junit.Test; -051import org.junit.experimental.categories.Category; -052 +051import org.junit.Ignore; +052import org.junit.experimental.categories.Category; 053 -054@Category(MediumTests.class) -055public class TestReplicator extends TestReplicationBase { -056 -057 static final Log LOG = LogFactory.getLog(TestReplicator.class); -058 static final int NUM_ROWS = 10; -059 -060 @BeforeClass -061 public static void setUpBeforeClass() throws Exception { -062// Set RPC size limit to 10kb (will be applied to both source and sink clusters) -063 conf1.setInt(RpcServer.MAX_REQUEST_SIZE, 1024 * 10); -064 TestReplicationBase.setUpBeforeClass(); -065admin.removePeer("2"); // Remove the peer set up for us by base class -066 } -067 -068 @Test -069 public void testReplicatorBatching() throws Exception { -070// Clear the tables -071truncateTable(utility1, tableName); -072truncateTable(utility2, tableName); -073 -074// Replace the peer set up for us by the base class with a wrapper for this test -075 admin.addPeer("testReplicatorBatching", -076 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -077 .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null); -078 -079 ReplicationEndpointForTest.setBatchCount(0); -080 ReplicationEndpointForTest.setEntriesCount(0); -081try { -082 ReplicationEndpointForTest.pause(); -083 try { -084// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -085// have to be replicated separately. -086final byte[] valueBytes = new byte[8 *1024]; -087for (int i = 0; i < NUM_ROWS; i++) { -088 htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) -089.addColumn(famName, null, valueBytes) -090 ); -091} -092 } finally { -093 ReplicationEndpointForTest.resume(); -094 } -095 -096 // Wait for replication to complete. -097 Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() { -098@Override -099public boolean evaluate() throws Exception { -100 return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS; -101} -102 -103@Override -104public String explainFailure() throws Exception { -105 return "We waited too long for expected replication of " + NUM_ROWS + " entries"; -106} -107 }); -108 -109 assertEquals("We sent an incorrect number of batches", NUM_ROWS, -110 ReplicationEndpointForTest.getBatchCount()); -111 assertEquals("We did not replicate enough rows", NUM_ROWS, -112utility2.countRows(htable2)); -113} finally { -114 admin.removePeer("testReplicatorBatching"); -115} -116 } -117 -118 @Test -119 public void testReplicatorWithErrors() throws Exception { -120// Clear the tables -121truncateTable(utility1, tableName); -122truncateTable(utility2, tableName); -123 -124// Replace the peer set up for us by the base class with a wrapper for this test -125 admin.addPeer("testReplicatorWithErrors", -126 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -127 .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()), -128null); -129 -130 FailureInjectingReplicationEndpointForTest.setBatchCount(0); -131 FailureInjectingReplicationEndpointForTest.setEntriesCount(0); -132try { -133 FailureInjectingReplicationEndpointForTest.pause(); -134 try { -135// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -136// have to be replicated separately. -137final byte[] valueBytes = new byte[8 *1024]; -138for (int i = 0; i < NUM_ROWS; i++) { -139 htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) -140.addColumn(famName, null, valueBytes) -141 ); -142
[51/51] [partial] hbase-site git commit: Published site at .
Published site at . Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/35decbe4 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/35decbe4 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/35decbe4 Branch: refs/heads/asf-site Commit: 35decbe40c66428deefc9b91e3ba5e4503940a81 Parents: 26b2bd6 Author: jenkins Authored: Tue Oct 31 15:16:30 2017 + Committer: jenkins Committed: Tue Oct 31 15:16:30 2017 + -- acid-semantics.html | 4 +- apache_hbase_reference_guide.pdf| 4 +- .../hadoop/hbase/snapshot/ExportSnapshot.html |12 +- .../hadoop/hbase/snapshot/ExportSnapshot.html | 2000 +- book.html | 2 +- bulk-loads.html | 4 +- checkstyle-aggregate.html | 19362 - checkstyle.rss | 8 +- coc.html| 4 +- cygwin.html | 4 +- dependencies.html | 4 +- dependency-convergence.html | 4 +- dependency-info.html| 4 +- dependency-management.html | 4 +- devapidocs/constant-values.html |90 +- devapidocs/index-all.html |29 +- .../BackupRestoreConstants.BackupCommand.html |44 +- .../hbase/backup/BackupRestoreConstants.html|85 +- .../backup/impl/FullTableBackupClient.html |14 +- .../impl/IncrementalTableBackupClient.html |34 +- .../hbase/backup/impl/RestoreTablesClient.html |32 +- .../hadoop/hbase/backup/package-tree.html | 6 +- .../hadoop/hbase/client/package-tree.html |26 +- .../hadoop/hbase/executor/package-tree.html | 2 +- .../hadoop/hbase/filter/package-tree.html | 6 +- .../hadoop/hbase/io/hfile/package-tree.html | 6 +- .../hadoop/hbase/mapreduce/package-tree.html| 2 +- .../org/apache/hadoop/hbase/master/HMaster.html | 232 +- .../hadoop/hbase/master/package-tree.html | 4 +- .../org/apache/hadoop/hbase/package-tree.html |14 +- .../hadoop/hbase/procedure2/package-tree.html | 6 +- .../hadoop/hbase/quotas/package-tree.html | 6 +- .../apache/hadoop/hbase/regionserver/Chunk.html |31 +- ...ator.MemStoreChunkPool.StatisticsThread.html | 8 +- .../ChunkCreator.MemStoreChunkPool.html |26 +- .../hadoop/hbase/regionserver/ChunkCreator.html | 137 +- .../regionserver/ImmutableMemStoreLAB.html |37 +- .../hadoop/hbase/regionserver/MemStoreLAB.html |32 +- .../hbase/regionserver/MemStoreLABImpl.html |43 +- .../hadoop/hbase/regionserver/OffheapChunk.html | 2 +- .../hadoop/hbase/regionserver/OnheapChunk.html | 2 +- .../hbase/regionserver/RSRpcServices.html | 136 +- .../hbase/regionserver/class-use/Chunk.html |46 +- .../class-use/CompactingMemStore.IndexType.html |19 +- .../hadoop/hbase/regionserver/package-tree.html |14 +- .../replication/regionserver/package-tree.html | 2 +- .../hadoop/hbase/rest/model/package-tree.html | 2 +- .../hbase/rsgroup/RSGroupBasedLoadBalancer.html |64 +- .../hbase/security/access/package-tree.html | 4 +- .../hadoop/hbase/security/package-tree.html | 2 +- .../hbase/snapshot/ExportSnapshot.Counter.html |20 +- .../snapshot/ExportSnapshot.ExportMapper.html |64 +- ...hotInputFormat.ExportSnapshotInputSplit.html |20 +- ...tInputFormat.ExportSnapshotRecordReader.html |24 +- ...xportSnapshot.ExportSnapshotInputFormat.html | 8 +- .../hbase/snapshot/ExportSnapshot.Options.html |28 +- .../hbase/snapshot/ExportSnapshot.Testing.html |12 +- .../hadoop/hbase/snapshot/ExportSnapshot.html | 103 +- .../hadoop/hbase/thrift/package-tree.html | 4 +- .../apache/hadoop/hbase/util/package-tree.html |10 +- .../org/apache/hadoop/hbase/Version.html| 6 +- .../BackupRestoreConstants.BackupCommand.html |65 +- .../hbase/backup/BackupRestoreConstants.html|65 +- .../backup/impl/FullTableBackupClient.html | 404 +- .../impl/IncrementalTableBackupClient.html | 742 +- .../hbase/backup/impl/RestoreTablesClient.html | 519 +- .../master/HMaster.InitializationMonitor.html | 3959 ++-- .../hbase/master/HMaster.RedirectServlet.html | 3959 ++-- .../org/apache/hadoop/hbase/master/HMaster.html | 3959 ++-- .../apache/hadoop/hbase/regionserver/Chunk.html | 176 +- ...ator.MemStoreChunkPool.StatisticsThread.html | 679 +- .../ChunkCreator.MemStoreChunkPool.html | 679 +- .../hadoop/hbase/regionserver/ChunkCreator.html | 679 +- .../region
[14/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -192 -193 try { -194srcConf.setBoolean("fs." + i
[20/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECT
[29/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress address = rpcServer.getListenerAddress(); -1220if (address == null) { -1221
hbase-site git commit: INFRA-10751 Empty commit
Repository: hbase-site Updated Branches: refs/heads/asf-site 35decbe40 -> 80201fd96 INFRA-10751 Empty commit Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/80201fd9 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/80201fd9 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/80201fd9 Branch: refs/heads/asf-site Commit: 80201fd96b156e2de21485a0273eff4a557318e4 Parents: 35decbe Author: jenkins Authored: Tue Oct 31 15:17:10 2017 + Committer: jenkins Committed: Tue Oct 31 15:17:10 2017 + -- --
[21/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Counter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Counter.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Counter.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Counter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Counter.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -192 -193 try
[41/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html b/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html index 7de3cfb..70e5ede 100644 --- a/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html +++ b/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportMapper.html @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab"; -private static class ExportSnapshot.ExportMapper +private static class ExportSnapshot.ExportMapper extends org.apache.hadoop.mapreduce.Mapper@@ -373,7 +373,7 @@ extends org.apache.hadoop.mapreduce.Mapper String filesGroup +private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String filesGroup @@ -426,7 +426,7 @@ extends org.apache.hadoop.mapreduce.Mapper String filesUser +private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String filesUser @@ -435,7 +435,7 @@ extends org.apache.hadoop.mapreduce.Mapper
[47/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/checkstyle.rss -- diff --git a/checkstyle.rss b/checkstyle.rss index d2e79f9..ab5f6d0 100644 --- a/checkstyle.rss +++ b/checkstyle.rss @@ -26,7 +26,7 @@ under the License. ©2007 - 2017 The Apache Software Foundation File: 3506, - Errors: 22097, + Errors: 22096, Warnings: 0, Infos: 0 @@ -881,7 +881,7 @@ under the License. 0 - 6 + 7 @@ -22707,7 +22707,7 @@ under the License. 0 - 4 + 3 @@ -44743,7 +44743,7 @@ under the License. 0 - 10 + 9 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/coc.html -- diff --git a/coc.html b/coc.html index 97c970b..ee12ee2 100644 --- a/coc.html +++ b/coc.html @@ -7,7 +7,7 @@ - + Apache HBase – Code of Conduct Policy @@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/cygwin.html -- diff --git a/cygwin.html b/cygwin.html index b34025a..79764ff 100644 --- a/cygwin.html +++ b/cygwin.html @@ -7,7 +7,7 @@ - + Apache HBase – Installing Apache HBase (TM) on Windows using Cygwin @@ -679,7 +679,7 @@ Now your HBase server is running, start coding and build that next https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/dependencies.html -- diff --git a/dependencies.html b/dependencies.html index f27fff9..68e3b78 100644 --- a/dependencies.html +++ b/dependencies.html @@ -7,7 +7,7 @@ - + Apache HBase – Project Dependencies @@ -445,7 +445,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/dependency-convergence.html -- diff --git a/dependency-convergence.html b/dependency-convergence.html index cb5956b..06f3cd0 100644 --- a/dependency-convergence.html +++ b/dependency-convergence.html @@ -7,7 +7,7 @@ - + Apache HBase – Reactor Dependency Convergence @@ -933,7 +933,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/dependency-info.html -- diff --git a/dependency-info.html b/dependency-info.html index a0b27c8..152c0a0 100644 --- a/dependency-info.html +++ b/dependency-info.html @@ -7,7 +7,7 @@ - + Apache HBase – Dependency Information @@ -318,7 +318,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/dependency-management.html -- diff --git a/dependency-management.html b/dependency-management.html index 4b64b38..987a9aa 100644 --- a/dependency-management.html +++ b/dependency-management.html @@ -7,7 +7,7 @@ - +
[44/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/master/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html index 8c0860d..1ef7d97 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html @@ -324,11 +324,11 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum(implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) +org.apache.hadoop.hbase.master.RegionState.State org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective +org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage -org.apache.hadoop.hbase.master.RegionState.State org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode -org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html b/devapidocs/org/apache/hadoop/hbase/package-tree.html index cdd8f19..8002eef 100644 --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html @@ -432,19 +432,19 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus -org.apache.hadoop.hbase.ClusterStatus.Option -org.apache.hadoop.hbase.KeyValue.Type -org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage -org.apache.hadoop.hbase.KeepDeletedCells org.apache.hadoop.hbase.MemoryCompactionPolicy org.apache.hadoop.hbase.MetaTableAccessor.QueryType -org.apache.hadoop.hbase.CellBuilder.DataType -org.apache.hadoop.hbase.HConstants.OperationStatusCode +org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus org.apache.hadoop.hbase.ProcedureState org.apache.hadoop.hbase.CellBuilderType +org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage org.apache.hadoop.hbase.Coprocessor.State +org.apache.hadoop.hbase.HConstants.OperationStatusCode +org.apache.hadoop.hbase.ClusterStatus.Option org.apache.hadoop.hbase.CompareOperator +org.apache.hadoop.hbase.KeepDeletedCells +org.apache.hadoop.hbase.CellBuilder.DataType +org.apache.hadoop.hbase.KeyValue.Type http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html index 55f1fa6..e6417d5 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html @@ -203,11 +203,11 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow org.apache.hadoop.hbase.procedure2.Procedure.LockState -org.apache.hadoop.hbase.procedure2.LockedResourceType -org.apache.hadoop.hbase.procedure2.RootProcedureState.State org.apache.hadoop.hbase.procedure2.LockType +org.apache.hadoop.hbase.procedure2.RootProcedureState.State +org.apache.hadoop.hbase.procedure2.LockedResourceType +org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html --
[39/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html index 687328c..e354b98 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html @@ -30,206 +30,216 @@ 022import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_MAX_ATTEMPTS_KEY; 023import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS; 024import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_BACKUP_MAX_ATTEMPTS; -025 -026import java.io.IOException; -027import java.util.HashMap; -028import java.util.List; -029import java.util.Map; -030 -031import org.apache.commons.logging.Log; -032import org.apache.commons.logging.LogFactory; -033import org.apache.hadoop.hbase.TableName; -034import org.apache.hadoop.hbase.backup.BackupCopyJob; -035import org.apache.hadoop.hbase.backup.BackupInfo; -036import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; -037import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -038import org.apache.hadoop.hbase.backup.BackupRequest; -039import org.apache.hadoop.hbase.backup.BackupRestoreFactory; -040import org.apache.hadoop.hbase.backup.BackupType; -041import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; -042import org.apache.hadoop.hbase.backup.util.BackupUtils; -043import org.apache.yetus.audience.InterfaceAudience; -044import org.apache.hadoop.hbase.client.Admin; -045import org.apache.hadoop.hbase.client.Connection; -046import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -047 -048/** -049 * Full table backup implementation -050 * -051 */ -052@InterfaceAudience.Private -053public class FullTableBackupClient extends TableBackupClient { -054 private static final Log LOG = LogFactory.getLog(FullTableBackupClient.class); -055 -056 public FullTableBackupClient() { -057 } -058 -059 public FullTableBackupClient(final Connection conn, final String backupId, BackupRequest request) -060 throws IOException { -061super(conn, backupId, request); -062 } -063 -064 /** -065 * Do snapshot copy. -066 * @param backupInfo backup info -067 * @throws Exception exception -068 */ -069 protected void snapshotCopy(BackupInfo backupInfo) throws Exception { -070LOG.info("Snapshot copy is starting."); -071 -072// set overall backup phase: snapshot_copy -073 backupInfo.setPhase(BackupPhase.SNAPSHOTCOPY); -074 -075// call ExportSnapshot to copy files based on hbase snapshot for backup -076// ExportSnapshot only support single snapshot export, need loop for multiple tables case -077BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf); -078 -079// number of snapshots matches number of tables -080float numOfSnapshots = backupInfo.getSnapshotNames().size(); -081 -082LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); -083 -084for (TableName table : backupInfo.getTables()) { -085 // Currently we simply set the sub copy tasks by counting the table snapshot number, we can -086 // calculate the real files' size for the percentage in the future. -087 // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); -088 int res = 0; -089 String[] args = new String[4]; -090 args[0] = "-snapshot"; -091 args[1] = backupInfo.getSnapshotName(table); -092 args[2] = "-copy-to"; -093 args[3] = backupInfo.getTableBackupDir(table); -094 -095 LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); -096 res = copyService.copy(backupInfo, backupManager, conf, BackupType.FULL, args); -097 // if one snapshot export failed, do not continue for remained snapshots -098 if (res != 0) { -099LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); -100 -101throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] -102+ " with reason code " + res); -103 } -104 LOG.info("Snapshot copy " + args[1] + " finished."); -105} -106 } -107 -108 /** -109 * Backup request execution -110 * @throws IOException -111 */ -112 @Override -113 public void execute() throws IOException { -114try (Admin admin = conn.getAdmin();) { -115 -116 // Begin BACKUP -117 beginBackup(backupManager, backupInfo); -118 String savedStartCode = null; -119 boolean firstBackup = false; -120 // do snapshot for full table backup -121 -122 savedStartCode = backupManager.readB
[46/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.html b/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.html index e8626d2..0f811fd 100644 --- a/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.html +++ b/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.html @@ -193,118 +193,122 @@ public interface static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_BANDWIDTH +JOB_NAME_CONF_KEY static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_BANDWIDTH_DESC +OPTION_BANDWIDTH static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_CHECK +OPTION_BANDWIDTH_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_CHECK_DESC +OPTION_CHECK static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_DEBUG +OPTION_CHECK_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_DEBUG_DESC +OPTION_DEBUG static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_OVERWRITE +OPTION_DEBUG_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_OVERWRITE_DESC +OPTION_OVERWRITE static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_PATH +OPTION_OVERWRITE_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_PATH_DESC +OPTION_PATH static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_RECORD_NUMBER +OPTION_PATH_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_RECORD_NUMBER_DESC +OPTION_RECORD_NUMBER static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_SET +OPTION_RECORD_NUMBER_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_SET_BACKUP_DESC +OPTION_SET static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_SET_DESC +OPTION_SET_BACKUP_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_SET_RESTORE_DESC +OPTION_SET_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_TABLE +OPTION_SET_RESTORE_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_TABLE_DESC +OPTION_TABLE static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_TABLE_LIST +OPTION_TABLE_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_TABLE_LIST_DESC +OPTION_TABLE_LIST static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_TABLE_MAPPING +OPTION_TABLE_LIST_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_TABLE_MAPPING_DESC +OPTION_TABLE_MAPPING static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_WORKERS +OPTION_TABLE_MAPPING_DESC static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -OPTION_WORKERS_DESC +OPTION_WORKERS static http://docs.ora
[23/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress address = rpcServer.getListenerAddress(); -1220if (address == null) { -1221 throw new IOException("Listener channel is closed"); -1222
[43/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/regionserver/OffheapChunk.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/OffheapChunk.html b/devapidocs/org/apache/hadoop/hbase/regionserver/OffheapChunk.html index 8ebdcb4..e7ead3c 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/OffheapChunk.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/OffheapChunk.html @@ -184,7 +184,7 @@ extends Methods inherited from class org.apache.hadoop.hbase.regionserver.Chunk -alloc, getData, getId, getNextFreeOffset, init, isFromPool, reset, toString +alloc, getData, getId, getNextFreeOffset, init, isFromPool, isJumbo, reset, toString http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/regionserver/OnheapChunk.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/OnheapChunk.html b/devapidocs/org/apache/hadoop/hbase/regionserver/OnheapChunk.html index a759a85..d3b4483 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/OnheapChunk.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/OnheapChunk.html @@ -184,7 +184,7 @@ extends Methods inherited from class org.apache.hadoop.hbase.regionserver.Chunk -alloc, getData, getId, getNextFreeOffset, init, isFromPool, reset, toString +alloc, getData, getId, getNextFreeOffset, init, isFromPool, isJumbo, reset, toString http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html index cfed4f0..93397e6 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html @@ -1197,7 +1197,7 @@ implements ld -private final RSRpcServices.LogDelegate ld +private final RSRpcServices.LogDelegate ld @@ -1207,7 +1207,7 @@ implements SCANNER_ALREADY_CLOSED http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true"; title="class or interface in java.lang">@Deprecated -private static final http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException SCANNER_ALREADY_CLOSED +private static final http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException SCANNER_ALREADY_CLOSED Deprecated. @@ -1225,7 +1225,7 @@ private static final http://docs.oracle.com/javase/8/docs/api/java RSRpcServices -public RSRpcServices(HRegionServer rs) +public RSRpcServices(HRegionServer rs) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException Throws: @@ -1239,7 +1239,7 @@ private static final http://docs.oracle.com/javase/8/docs/api/java RSRpcServices -RSRpcServices(HRegionServer rs, +RSRpcServices(HRegionServer rs, RSRpcServices.LogDelegate ld) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException @@ -1563,7 +1563,7 @@ private static final http://docs.oracle.com/javase/8/docs/api/java onConfigurationChange -public void onConfigurationChange(org.apache.hadoop.conf.Configuration newConf) +public void onConfigurationChange(org.apache.hadoop.conf.Configuration newConf) Description copied from interface: ConfigurationObserver This method would be called by the ConfigurationManager object when the Configuration object is reloaded from disk. @@ -1579,7 +1579,7 @@ private static final http://docs.oracle.com/javase/8/docs/api/java createPriority -protected PriorityFunction createPriority() +protected PriorityFunction createPriority() @@ -1588,7 +1588,7 @@ private static final http://docs.oracle.com/javase/8/docs/api/java getHostname -public static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String getHostname(org.apache.hadoop.conf.Configuration conf, +public static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String getHostname(org.apache.hadoop.conf.Configuration conf, boolean isMaster) throws http://docs.oracle.com/javase/8/docs/api/java/net/UnknownHostException.html?is-external=true"; title="class or interface in java.net">UnknownHostException
[40/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html b/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html index 2939c25..a1a3b17 100644 --- a/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html +++ b/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html @@ -228,83 +228,87 @@ implements org.apache.hadoop.util.Tool private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -CONF_NUM_SPLITS +CONF_MR_JOB_NAME private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -CONF_OUTPUT_ROOT +CONF_NUM_SPLITS +private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String +CONF_OUTPUT_ROOT + + protected static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_SKIP_TMP - + private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_SNAPSHOT_DIR - + private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_SNAPSHOT_NAME - + static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_SOURCE_PREFIX Configuration prefix for overrides for the source filesystem - + private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String filesGroup - + private int filesMode - + private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String filesUser - + private org.apache.hadoop.fs.Path inputRoot - + private static org.apache.commons.logging.Log LOG - + private int mappers - + private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String MR_NUM_MAPS - + static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String NAME - + private org.apache.hadoop.fs.Path outputRoot - + private boolean overwrite - + private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String snapshotName - + private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String targetName - + private boolean verifyChecksum - + private boolean verifyTarget @@ -692,13 +696,26 @@ implements org.apache.hadoop.util.Tool + + + + + +CONF_MR_JOB_NAME +private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_MR_JOB_NAME + +See Also: +Constant Field Values + + + CONF_SKIP_TMP -protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_SKIP_TMP +protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String CONF_SKIP_TMP See Also: Constant Field Values @@ -711,7 +728,7 @@ implements org.apache.hadoop.util.Tool verifyTarget -private boolean verifyTarget +private boolean verifyTarget @@ -720,7 +737,7 @@ implements org.apache.hadoop.util.Tool verifyChecksum -private boolean verifyChecksum +private boolean verifyChecksum @@ -729,7 +746,7 @@ implements org.apache.hadoop.util.Tool snapshotName -private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String snapshotName +private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String snapshotName @@ -738,7 +755,7 @@ implements org.apache.hadoop.util.Tool targetName -private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String targetName +private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String targetName @@ -747,7 +764,7 @@ implements org.apache.hadoop.util.Tool overwrite -private
[06/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html index 312f6e4..6a81b0e 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.html @@ -40,801 +40,746 @@ 032import org.apache.hadoop.hbase.testclassification.MediumTests; 033import org.apache.hadoop.hbase.testclassification.RegionServerTests; 034import org.apache.hadoop.hbase.util.Bytes; -035import org.apache.hadoop.hbase.util.ClassSize; -036import org.apache.hadoop.hbase.util.EnvironmentEdge; -037import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -038import org.apache.hadoop.hbase.util.Threads; -039import org.apache.hadoop.hbase.wal.WAL; -040import org.junit.After; -041import org.junit.Before; -042import org.junit.Test; -043import org.junit.experimental.categories.Category; -044 -045import static org.junit.Assert.assertEquals; -046import static org.junit.Assert.assertTrue; -047 -048/** -049 * compacted memstore test case -050 */ -051@Category({RegionServerTests.class, MediumTests.class}) -052public class TestCompactingMemStore extends TestDefaultMemStore { -053 -054 private static final Log LOG = LogFactory.getLog(TestCompactingMemStore.class); -055 protected static ChunkCreator chunkCreator; -056 protected HRegion region; -057 protected RegionServicesForStores regionServicesForStores; -058 protected HStore store; -059 -060 // -061 // Helpers -062 // -063 protected static byte[] makeQualifier(final int i1, final int i2) { -064return Bytes.toBytes(Integer.toString(i1) + ";" + -065Integer.toString(i2)); -066 } -067 -068 @After -069 public void tearDown() throws Exception { -070chunkCreator.clearChunksInPool(); -071 } -072 -073 @Override -074 @Before -075 public void setUp() throws Exception { -076compactingSetUp(); -077this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparatorImpl.COMPARATOR, -078store, regionServicesForStores, MemoryCompactionPolicy.EAGER); -079 } -080 -081 protected void compactingSetUp() throws Exception { -082super.internalSetUp(); -083Configuration conf = new Configuration(); -084 conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); -085 conf.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); -086 conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 1000); -087HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf); -088HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); -089HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("foobar")); -090htd.addFamily(hcd); -091HRegionInfo info = -092new HRegionInfo(TableName.valueOf("foobar"), null, null, false); -093WAL wal = hbaseUtility.createWal(conf, hbaseUtility.getDataTestDir(), info); -094this.region = HRegion.createHRegion(info, hbaseUtility.getDataTestDir(), conf, htd, wal, true); -095//this.region = hbaseUtility.createTestRegion("foobar", hcd); -096this.regionServicesForStores = region.getRegionServicesForStores(); -097this.store = new HStore(region, hcd, conf); -098 -099long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() -100.getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); -101chunkCreator = ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, -102globalMemStoreLimit, 0.4f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); -103assertTrue(chunkCreator != null); -104 } -105 -106 /** -107 * A simple test which verifies the 3 possible states when scanning across snapshot. -108 * -109 * @throws IOException -110 * @throws CloneNotSupportedException -111 */ -112 @Override -113 @Test -114 public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException { -115// we are going to the scanning across snapshot with two kvs -116// kv1 should always be returned before kv2 -117final byte[] one = Bytes.toBytes(1); -118final byte[] two = Bytes.toBytes(2); -119final byte[] f = Bytes.toBytes("f"); -120final byte[] q = Bytes.toBytes("q"); -121final byte[] v = Bytes.toBytes(3); -122 -123final KeyValue kv1 = new KeyValue(one, f, q, 10, v); -124final KeyValue kv2 = new KeyValue(two, f, q, 10, v); -125 -126// use case 1: both kvs in kvset -127this.memstore.
[32/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html index faa5693..157ecf3 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html @@ -118,338 +118,375 @@ 110 * @return the chunk that was initialized 111 */ 112 Chunk getChunk() { -113return getChunk(CompactingMemStore.IndexType.ARRAY_MAP); +113return getChunk(CompactingMemStore.IndexType.ARRAY_MAP, chunkSize); 114 } 115 116 /** -117 * Creates and inits a chunk. +117 * Creates and inits a chunk. The default implementation for specific index type. 118 * @return the chunk that was initialized -119 * @param chunkIndexType whether the requested chunk is going to be used with CellChunkMap index -120 */ -121 Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) { -122Chunk chunk = null; -123if (pool != null) { -124 // the pool creates the chunk internally. The chunk#init() call happens here -125 chunk = this.pool.getChunk(); -126 // the pool has run out of maxCount -127 if (chunk == null) { -128if (LOG.isTraceEnabled()) { -129 LOG.trace("The chunk pool is full. Reached maxCount= " + this.pool.getMaxCount() -130 + ". Creating chunk onheap."); -131} -132 } -133} -134if (chunk == null) { -135 // the second boolean parameter means: -136 // if CellChunkMap index is requested, put allocated on demand chunk mapping into chunkIdMap -137 chunk = createChunk(false, chunkIndexType); -138} -139 -140// now we need to actually do the expensive memory allocation step in case of a new chunk, -141// else only the offset is set to the beginning of the chunk to accept allocations -142chunk.init(); -143return chunk; -144 } -145 -146 /** -147 * Creates the chunk either onheap or offheap -148 * @param pool indicates if the chunks have to be created which will be used by the Pool -149 * @param chunkIndexType -150 * @return the chunk -151 */ -152 private Chunk createChunk(boolean pool, CompactingMemStore.IndexType chunkIndexType) { -153Chunk chunk = null; -154int id = chunkID.getAndIncrement(); -155assert id > 0; -156// do not create offheap chunk on demand -157if (pool && this.offheap) { -158 chunk = new OffheapChunk(chunkSize, id, pool); -159} else { -160 chunk = new OnheapChunk(chunkSize, id, pool); -161} -162if (pool || (chunkIndexType == CompactingMemStore.IndexType.CHUNK_MAP)) { -163 // put the pool chunk into the chunkIdMap so it is not GC-ed -164 this.chunkIdMap.put(chunk.getId(), chunk); -165} -166return chunk; -167 } -168 -169 @VisibleForTesting -170 // Used to translate the ChunkID into a chunk ref -171 Chunk getChunk(int id) { -172// can return null if chunk was never mapped -173return chunkIdMap.get(id); -174 } -175 -176 int getChunkSize() { -177return this.chunkSize; -178 } -179 -180 boolean isOffheap() { -181return this.offheap; -182 } -183 -184 private void removeChunks(SetchunkIDs) { -185 this.chunkIdMap.keySet().removeAll(chunkIDs); -186 } -187 -188 Chunk removeChunk(int chunkId) { -189return this.chunkIdMap.remove(chunkId); -190 } -191 -192 @VisibleForTesting -193 // the chunks in the chunkIdMap may already be released so we shouldn't relay -194 // on this counting for strong correctness. This method is used only in testing. -195 int numberOfMappedChunks() { -196return this.chunkIdMap.size(); -197 } -198 -199 @VisibleForTesting -200 void clearChunkIds() { -201this.chunkIdMap.clear(); -202 } -203 -204 /** -205 * A pool of {@link Chunk} instances. -206 * -207 * MemStoreChunkPool caches a number of retired chunks for reusing, it could -208 * decrease allocating bytes when writing, thereby optimizing the garbage -209 * collection on JVM. -210 */ -211 private class MemStoreChunkPool implements HeapMemoryTuneObserver { -212private int maxCount; -213 -214// A queue of reclaimed chunks -215private final BlockingQueue reclaimedChunks; -216private final float poolSizePercentage; -217 -218/** Statistics thread schedule pool */ -219private final ScheduledExecutorService scheduleThreadPool; -220/** Statistics thread */ -221private static final int statThreadPeriod = 60 * 5; -222private final AtomicLong chunkCount = new AtomicLong(); -223private final LongAdder reusedChunkCou
[07/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.EnvironmentEdgeForMemstoreTest.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.EnvironmentEdgeForMemstoreTest.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.EnvironmentEdgeForMemstoreTest.html index 312f6e4..6a81b0e 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.EnvironmentEdgeForMemstoreTest.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.EnvironmentEdgeForMemstoreTest.html @@ -40,801 +40,746 @@ 032import org.apache.hadoop.hbase.testclassification.MediumTests; 033import org.apache.hadoop.hbase.testclassification.RegionServerTests; 034import org.apache.hadoop.hbase.util.Bytes; -035import org.apache.hadoop.hbase.util.ClassSize; -036import org.apache.hadoop.hbase.util.EnvironmentEdge; -037import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -038import org.apache.hadoop.hbase.util.Threads; -039import org.apache.hadoop.hbase.wal.WAL; -040import org.junit.After; -041import org.junit.Before; -042import org.junit.Test; -043import org.junit.experimental.categories.Category; -044 -045import static org.junit.Assert.assertEquals; -046import static org.junit.Assert.assertTrue; -047 -048/** -049 * compacted memstore test case -050 */ -051@Category({RegionServerTests.class, MediumTests.class}) -052public class TestCompactingMemStore extends TestDefaultMemStore { -053 -054 private static final Log LOG = LogFactory.getLog(TestCompactingMemStore.class); -055 protected static ChunkCreator chunkCreator; -056 protected HRegion region; -057 protected RegionServicesForStores regionServicesForStores; -058 protected HStore store; -059 -060 // -061 // Helpers -062 // -063 protected static byte[] makeQualifier(final int i1, final int i2) { -064return Bytes.toBytes(Integer.toString(i1) + ";" + -065Integer.toString(i2)); -066 } -067 -068 @After -069 public void tearDown() throws Exception { -070chunkCreator.clearChunksInPool(); -071 } -072 -073 @Override -074 @Before -075 public void setUp() throws Exception { -076compactingSetUp(); -077this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparatorImpl.COMPARATOR, -078store, regionServicesForStores, MemoryCompactionPolicy.EAGER); -079 } -080 -081 protected void compactingSetUp() throws Exception { -082super.internalSetUp(); -083Configuration conf = new Configuration(); -084 conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); -085 conf.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); -086 conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 1000); -087HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf); -088HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); -089HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("foobar")); -090htd.addFamily(hcd); -091HRegionInfo info = -092new HRegionInfo(TableName.valueOf("foobar"), null, null, false); -093WAL wal = hbaseUtility.createWal(conf, hbaseUtility.getDataTestDir(), info); -094this.region = HRegion.createHRegion(info, hbaseUtility.getDataTestDir(), conf, htd, wal, true); -095//this.region = hbaseUtility.createTestRegion("foobar", hcd); -096this.regionServicesForStores = region.getRegionServicesForStores(); -097this.store = new HStore(region, hcd, conf); -098 -099long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() -100.getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); -101chunkCreator = ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, -102globalMemStoreLimit, 0.4f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); -103assertTrue(chunkCreator != null); -104 } -105 -106 /** -107 * A simple test which verifies the 3 possible states when scanning across snapshot. -108 * -109 * @throws IOException -110 * @throws CloneNotSupportedException -111 */ -112 @Override -113 @Test -114 public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException { -115// we are going to the scanning across snapshot with two kvs -116// kv1 should always be returned before kv2 -117final byte[] one = Bytes.toBytes(1); -118final byte[] two = Bytes.toBytes(2); -119final byte[] f = Bytes.toBytes("f"); -120final byte[] q = Bytes.toBytes("q"); -121final byte[] v = Bytes.toBytes(3); -122 -123final KeyValue kv1 = new Key
[45/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html index c50ef11..0c18b0d 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html @@ -2686,7 +2686,7 @@ implements setCatalogJanitorEnabled -public void setCatalogJanitorEnabled(boolean b) +public void setCatalogJanitorEnabled(boolean b) Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to run. It will just be a noop if disabled. @@ -2702,7 +2702,7 @@ implements mergeRegions -public long mergeRegions(RegionInfo[] regionsToMerge, +public long mergeRegions(RegionInfo[] regionsToMerge, boolean forcible, long nonceGroup, long nonce) @@ -2730,7 +2730,7 @@ implements splitRegion -public long splitRegion(RegionInfo regionInfo, +public long splitRegion(RegionInfo regionInfo, byte[] splitRow, long nonceGroup, long nonce) @@ -2758,7 +2758,7 @@ implements move -public void move(byte[] encodedRegionName, +public void move(byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException @@ -2773,7 +2773,7 @@ implements createTable -public long createTable(TableDescriptor tableDescriptor, +public long createTable(TableDescriptor tableDescriptor, byte[][] splitKeys, long nonceGroup, long nonce) @@ -2798,7 +2798,7 @@ implements createSystemTable -public long createSystemTable(TableDescriptor tableDescriptor) +public long createSystemTable(TableDescriptor tableDescriptor) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException Description copied from interface: MasterServices Create a system table using the given table definition. @@ -2819,7 +2819,7 @@ implements sanityCheckTableDescriptor -private void sanityCheckTableDescriptor(TableDescriptor htd) +private void sanityCheckTableDescriptor(TableDescriptor htd) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException Checks whether the table conforms to some sane limits, and configured values (compression, etc) work. Throws an exception if something is wrong. @@ -2835,7 +2835,7 @@ implements checkReplicationScope -private void checkReplicationScope(ColumnFamilyDescriptor hcd) +private void checkReplicationScope(ColumnFamilyDescriptor hcd) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException Throws: @@ -2849,7 +2849,7 @@ implements checkCompactionPolicy -private void checkCompactionPolicy(org.apache.hadoop.conf.Configuration conf, +private void checkCompactionPolicy(org.apache.hadoop.conf.Configuration conf, TableDescriptor htd) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException @@ -2864,7 +2864,7 @@ implements warnOrThrowExceptionForFailure -private static void warnOrThrowExceptionForFailure(boolean logWarn, +private static void warnOrThrowExceptionForFailure(boolean logWarn, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String confKey, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String message, http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception cause) @@ -2881,7 +2881,7 @@ implements startActiveMasterManager -private void startActiveMasterManager(int infoPort) +private void startActiveMasterManager(int infoPort) throws org.apache.zookeeper.KeeperException Throws: @@ -2895,7 +2895,7 @@ implements checkCompression -private void checkCompression(TableDescriptor htd) +private void checkCompression(TableDescriptor htd) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException Throws: @@ -2909,7 +
[12/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html index c27d983..7a9fca5 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-shaded-client archetype – Dependency Information @@ -147,7 +147,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html index 7c7a87a..590764a 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-shaded-client archetype – Project Dependency Management @@ -784,7 +784,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html index cf3eaef..7be121f 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-shaded-client archetype – About @@ -119,7 +119,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html index a3ecf3a..c1dc8ab 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-shaded-client archetype – CI Management @@ -126,7 +126,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html index cf0db1e..15da6dc 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html @@ -7,7 +7,7 @@
[22/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html index bfa6836..3bf1fcf 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html @@ -37,416 +37,403 @@ 029import java.util.Map; 030import java.util.Set; 031import java.util.TreeMap; -032 -033import org.apache.commons.logging.Log; -034import org.apache.commons.logging.LogFactory; -035import org.apache.hadoop.conf.Configuration; -036import org.apache.hadoop.hbase.ClusterStatus; -037import org.apache.hadoop.hbase.HBaseIOException; -038import org.apache.hadoop.hbase.HConstants; -039import org.apache.hadoop.hbase.ServerName; -040import org.apache.hadoop.hbase.TableName; -041import org.apache.hadoop.hbase.client.RegionInfo; -042import org.apache.hadoop.hbase.constraint.ConstraintException; -043import org.apache.hadoop.hbase.master.LoadBalancer; -044import org.apache.hadoop.hbase.master.MasterServices; -045import org.apache.hadoop.hbase.master.RegionPlan; -046import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; -047import org.apache.hadoop.hbase.net.Address; -048import org.apache.hadoop.util.ReflectionUtils; -049import org.apache.yetus.audience.InterfaceAudience; -050 -051import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -052import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; -053import org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap; -054import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap; -055import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; -056import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; -057 -058/** -059 * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) -060 * It does region balance based on a table's group membership. -061 * -062 * Most assignment methods contain two exclusive code paths: Online - when the group -063 * table is online and Offline - when it is unavailable. -064 * -065 * During Offline, assignments are assigned based on cached information in zookeeper. -066 * If unavailable (ie bootstrap) then regions are assigned randomly. -067 * -068 * Once the GROUP table has been assigned, the balancer switches to Online and will then -069 * start providing appropriate assignments for user tables. -070 * -071 */ -072@InterfaceAudience.Private -073public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { -074 private static final Log LOG = LogFactory.getLog(RSGroupBasedLoadBalancer.class); -075 -076 private Configuration config; -077 private ClusterStatus clusterStatus; -078 private MasterServices masterServices; -079 private volatile RSGroupInfoManager rsGroupInfoManager; -080 private LoadBalancer internalBalancer; -081 -082 /** -083 * Used by reflection in {@link org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory}. -084 */ -085 @InterfaceAudience.Private -086 public RSGroupBasedLoadBalancer() {} -087 -088 @Override -089 public Configuration getConf() { -090return config; -091 } -092 -093 @Override -094 public void setConf(Configuration conf) { -095this.config = conf; -096 } -097 -098 @Override -099 public void setClusterStatus(ClusterStatus st) { -100this.clusterStatus = st; -101 } -102 -103 @Override -104 public void setMasterServices(MasterServices masterServices) { -105this.masterServices = masterServices; -106 } -107 -108 @Override -109 public ListbalanceCluster(TableName tableName, Map > -110 clusterState) throws HBaseIOException { -111return balanceCluster(clusterState); -112 } -113 -114 @Override -115 public List balanceCluster(Map > clusterState) -116 throws HBaseIOException { -117if (!isOnline()) { -118 throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME + -119 " is not online, unable to perform balance"); -120} -121 -122 Map > correctedState = correctAssignments(clusterState); -123List regionPlans = new ArrayList<>(); -124 -125List misplacedRegions = correctedState.get(LoadBalancer.BOGUS_SERVER_NAME); -126for (RegionInfo regionInfo : misplacedRegions) { -127 ServerName serverName = findServerForRegion(clusterState, regionInfo); -128 regionPlans.add(new RegionPlan(regionInfo, serverName, null)); -129} -130try { -131 List
[34/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html index b4165bf..dec7409 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html @@ -1560,1985 +1560,1986 @@ 1552 * @return Client info for use as prefix on an audit log string; who did an action 1553 */ 1554 public String getClientIdAuditPrefix() { -1555return "Client=" + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress(); -1556 } -1557 -1558 /** -1559 * Switch for the background CatalogJanitor thread. -1560 * Used for testing. The thread will continue to run. It will just be a noop -1561 * if disabled. -1562 * @param b If false, the catalog janitor won't do anything. -1563 */ -1564 public void setCatalogJanitorEnabled(final boolean b) { -1565 this.catalogJanitorChore.setEnabled(b); -1566 } -1567 -1568 @Override -1569 public long mergeRegions( -1570 final RegionInfo[] regionsToMerge, -1571 final boolean forcible, -1572 final long nonceGroup, -1573 final long nonce) throws IOException { -1574checkInitialized(); -1575 -1576assert(regionsToMerge.length == 2); -1577 -1578TableName tableName = regionsToMerge[0].getTable(); -1579if (tableName == null || regionsToMerge[1].getTable() == null) { -1580 throw new UnknownRegionException ("Can't merge regions without table associated"); -1581} -1582 -1583if (!tableName.equals(regionsToMerge[1].getTable())) { -1584 throw new IOException ( -1585"Cannot merge regions from two different tables " + regionsToMerge[0].getTable() -1586+ " and " + regionsToMerge[1].getTable()); -1587} -1588 -1589if (RegionInfo.COMPARATOR.compare(regionsToMerge[0], regionsToMerge[1]) == 0) { -1590 throw new MergeRegionException( -1591"Cannot merge a region to itself " + regionsToMerge[0] + ", " + regionsToMerge[1]); -1592} -1593 -1594return MasterProcedureUtil.submitProcedure( -1595new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -1596 @Override -1597 protected void run() throws IOException { -1598 getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); -1599 -1600 LOG.info(getClientIdAuditPrefix() + " Merge regions " + -1601 regionsToMerge[0].getEncodedName() + " and " + regionsToMerge[1].getEncodedName()); -1602 -1603submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), -1604 regionsToMerge, forcible)); -1605 -1606 getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); -1607 } -1608 -1609 @Override -1610 protected String getDescription() { -1611return "MergeTableProcedure"; -1612 } -1613}); -1614 } -1615 -1616 @Override -1617 public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, -1618 final long nonceGroup, final long nonce) -1619 throws IOException { -1620checkInitialized(); -1621return MasterProcedureUtil.submitProcedure( -1622new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -1623 @Override -1624 protected void run() throws IOException { -1625 getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); -1626 LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); -1627 -1628// Execute the operation asynchronously -1629 submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); -1630 } -1631 -1632 @Override -1633 protected String getDescription() { -1634return "SplitTableProcedure"; -1635 } -1636}); -1637 } -1638 -1639 // Public so can be accessed by tests. Blocks until move is done. -1640 // Replace with an async implementation from which you can get -1641 // a success/failure result. -1642 @VisibleForTesting -1643 public void move(final byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException { -1644RegionState regionState = assignmentManager.getRegionStates(). -1645 getRegionState(Bytes.toString(encodedRegionName)); -1646 -1647RegionInfo hri; -1648if (regionState != null) { -1649 hri = regionState.getRegion(); -1650} else { -1651 throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); -1652} -1653 -1654ServerName dest; -1655Listexclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() -1656: new ArrayList<>(1); -1657if (des
[17/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outp
[48/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/checkstyle-aggregate.html -- diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html index 4e3a251..d716e00 100644 --- a/checkstyle-aggregate.html +++ b/checkstyle-aggregate.html @@ -7,7 +7,7 @@ - + Apache HBase – Checkstyle Results @@ -289,7 +289,7 @@ 3506 0 0 -22097 +22096 Files @@ -8252,7 +8252,7 @@ org/apache/hadoop/hbase/regionserver/ChunkCreator.java 0 0 -10 +9 org/apache/hadoop/hbase/regionserver/CompactSplit.java 0 @@ -10527,7 +10527,7 @@ org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java 0 0 -6 +7 org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java 0 @@ -10957,7 +10957,7 @@ org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java 0 0 -4 +3 org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 0 @@ -13666,12 +13666,12 @@ http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation offset: "2" -866 +847 Error http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription -4453 +4471 Error misc @@ -18651,7 +18651,7 @@ Error javadoc -JavadocTagContinuationIndentation +NonEmptyAtclauseDescription Javadoc comment at column 26 has parse error. Missed HTML close tag 'arg'. Sometimes it means that close tag missed for one of previous tags. 43 @@ -19326,7 +19326,7 @@ Error javadoc -JavadocTagContinuationIndentation +NonEmptyAtclauseDescription Javadoc comment at column 4 has parse error. Missed HTML close tag 'pre'. Sometimes it means that close tag missed for one of previous tags. 59 @@ -21561,7 +21561,7 @@ Error javadoc -JavadocTagContinuationIndentation +NonEmptyAtclauseDescription Javadoc comment at column 19 has parse error. Details: no viable alternative at input '\n * List
[31/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.html index faa5693..157ecf3 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.html @@ -118,338 +118,375 @@ 110 * @return the chunk that was initialized 111 */ 112 Chunk getChunk() { -113return getChunk(CompactingMemStore.IndexType.ARRAY_MAP); +113return getChunk(CompactingMemStore.IndexType.ARRAY_MAP, chunkSize); 114 } 115 116 /** -117 * Creates and inits a chunk. +117 * Creates and inits a chunk. The default implementation for specific index type. 118 * @return the chunk that was initialized -119 * @param chunkIndexType whether the requested chunk is going to be used with CellChunkMap index -120 */ -121 Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) { -122Chunk chunk = null; -123if (pool != null) { -124 // the pool creates the chunk internally. The chunk#init() call happens here -125 chunk = this.pool.getChunk(); -126 // the pool has run out of maxCount -127 if (chunk == null) { -128if (LOG.isTraceEnabled()) { -129 LOG.trace("The chunk pool is full. Reached maxCount= " + this.pool.getMaxCount() -130 + ". Creating chunk onheap."); -131} -132 } -133} -134if (chunk == null) { -135 // the second boolean parameter means: -136 // if CellChunkMap index is requested, put allocated on demand chunk mapping into chunkIdMap -137 chunk = createChunk(false, chunkIndexType); -138} -139 -140// now we need to actually do the expensive memory allocation step in case of a new chunk, -141// else only the offset is set to the beginning of the chunk to accept allocations -142chunk.init(); -143return chunk; -144 } -145 -146 /** -147 * Creates the chunk either onheap or offheap -148 * @param pool indicates if the chunks have to be created which will be used by the Pool -149 * @param chunkIndexType -150 * @return the chunk -151 */ -152 private Chunk createChunk(boolean pool, CompactingMemStore.IndexType chunkIndexType) { -153Chunk chunk = null; -154int id = chunkID.getAndIncrement(); -155assert id > 0; -156// do not create offheap chunk on demand -157if (pool && this.offheap) { -158 chunk = new OffheapChunk(chunkSize, id, pool); -159} else { -160 chunk = new OnheapChunk(chunkSize, id, pool); -161} -162if (pool || (chunkIndexType == CompactingMemStore.IndexType.CHUNK_MAP)) { -163 // put the pool chunk into the chunkIdMap so it is not GC-ed -164 this.chunkIdMap.put(chunk.getId(), chunk); -165} -166return chunk; -167 } -168 -169 @VisibleForTesting -170 // Used to translate the ChunkID into a chunk ref -171 Chunk getChunk(int id) { -172// can return null if chunk was never mapped -173return chunkIdMap.get(id); -174 } -175 -176 int getChunkSize() { -177return this.chunkSize; -178 } -179 -180 boolean isOffheap() { -181return this.offheap; -182 } -183 -184 private void removeChunks(SetchunkIDs) { -185 this.chunkIdMap.keySet().removeAll(chunkIDs); -186 } -187 -188 Chunk removeChunk(int chunkId) { -189return this.chunkIdMap.remove(chunkId); -190 } -191 -192 @VisibleForTesting -193 // the chunks in the chunkIdMap may already be released so we shouldn't relay -194 // on this counting for strong correctness. This method is used only in testing. -195 int numberOfMappedChunks() { -196return this.chunkIdMap.size(); -197 } -198 -199 @VisibleForTesting -200 void clearChunkIds() { -201this.chunkIdMap.clear(); -202 } -203 -204 /** -205 * A pool of {@link Chunk} instances. -206 * -207 * MemStoreChunkPool caches a number of retired chunks for reusing, it could -208 * decrease allocating bytes when writing, thereby optimizing the garbage -209 * collection on JVM. -210 */ -211 private class MemStoreChunkPool implements HeapMemoryTuneObserver { -212private int maxCount; -213 -214// A queue of reclaimed chunks -215private final BlockingQueue reclaimedChunks; -216private final float poolSizePercentage; -217 -218/** Statistics thread schedule pool */ -219private final ScheduledExecutorService scheduleThreadPool; -220/** Statistics thread */ -221private static final int statThreadPeriod = 60 * 5; -222private final AtomicLong chunkCount = new AtomicLong(); -223private final LongAdder reusedChunkCount = new LongAdder(); +119 */ +120 Chunk getChunk(CompactingMemStore.IndexType chunkIndex
[26/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html index 9761535..a4d2378 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html @@ -1145,2347 +1145,2348 @@ 1137 if (LOG.isWarnEnabled()) { 1138LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold 1139+ ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: " -1140+ RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress() -1141+ " first region in multi=" + firstRegionName); -1142 } -1143} -1144 }; -1145 -1146 private final LogDelegate ld; -1147 -1148 public RSRpcServices(HRegionServer rs) throws IOException { -1149this(rs, DEFAULT_LOG_DELEGATE); -1150 } -1151 -1152 // Directly invoked only for testing -1153 RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException { -1154this.ld = ld; -1155regionServer = rs; -1156rowSizeWarnThreshold = rs.conf.getInt(BATCH_ROWS_THRESHOLD_NAME, BATCH_ROWS_THRESHOLD_DEFAULT); -1157RpcSchedulerFactory rpcSchedulerFactory; -1158try { -1159 Class> rpcSchedulerFactoryClass = rs.conf.getClass( -1160 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, -1161 SimpleRpcSchedulerFactory.class); -1162 rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance()); -1163} catch (InstantiationException e) { -1164 throw new IllegalArgumentException(e); -1165} catch (IllegalAccessException e) { -1166 throw new IllegalArgumentException(e); -1167} -1168// Server to handle client requests. -1169InetSocketAddress initialIsa; -1170InetSocketAddress bindAddress; -1171if(this instanceof MasterRpcServices) { -1172 String hostname = getHostname(rs.conf, true); -1173 int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); -1174 // Creation of a HSA will force a resolve. -1175 initialIsa = new InetSocketAddress(hostname, port); -1176 bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port); -1177} else { -1178 String hostname = getHostname(rs.conf, false); -1179 int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, -1180 HConstants.DEFAULT_REGIONSERVER_PORT); -1181 // Creation of a HSA will force a resolve. -1182 initialIsa = new InetSocketAddress(hostname, port); -1183 bindAddress = new InetSocketAddress( -1184 rs.conf.get("hbase.regionserver.ipc.address", hostname), port); -1185} -1186if (initialIsa.getAddress() == null) { -1187 throw new IllegalArgumentException("Failed resolve of " + initialIsa); -1188} -1189priority = createPriority(); -1190String name = rs.getProcessName() + "/" + initialIsa.toString(); -1191// Set how many times to retry talking to another server over Connection. -1192 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); -1193try { -1194 rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(), -1195 bindAddress, // use final bindAddress for this server. -1196 rs.conf, -1197 rpcSchedulerFactory.create(rs.conf, this, rs)); -1198 rpcServer.setRsRpcServices(this); -1199} catch (BindException be) { -1200 String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT : -1201 HConstants.REGIONSERVER_PORT; -1202 throw new IOException(be.getMessage() + ". To switch ports use the '" + configName + -1203 "' configuration property.", be.getCause() != null ? be.getCause() : be); -1204} -1205 -1206scannerLeaseTimeoutPeriod = rs.conf.getInt( -1207 HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, -1208 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); -1209maxScannerResultSize = rs.conf.getLong( -1210 HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, -1211 HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); -1212rpcTimeout = rs.conf.getInt( -1213 HConstants.HBASE_RPC_TIMEOUT_KEY, -1214 HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -1215minimumScanTimeLimitDelta = rs.conf.getLong( -1216 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, -1217 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); -1218 -1219InetSocketAddress
[50/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html index 87257da..add30e1 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -192 -193 try { -194srcConf.setBoolean("fs." + inputRoot.toUri(
[16/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); -189 -190 inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -191 outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); -192 -193 try
[11/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-shaded-check-invariants/mail-lists.html -- diff --git a/hbase-shaded-check-invariants/mail-lists.html b/hbase-shaded-check-invariants/mail-lists.html index 496fc39..f629cf5 100644 --- a/hbase-shaded-check-invariants/mail-lists.html +++ b/hbase-shaded-check-invariants/mail-lists.html @@ -7,7 +7,7 @@ - + Apache HBase Shaded Packaging Invariants – Project Mailing Lists @@ -176,7 +176,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-shaded-check-invariants/plugin-management.html -- diff --git a/hbase-shaded-check-invariants/plugin-management.html b/hbase-shaded-check-invariants/plugin-management.html index 22b4b1b..2c9d134 100644 --- a/hbase-shaded-check-invariants/plugin-management.html +++ b/hbase-shaded-check-invariants/plugin-management.html @@ -7,7 +7,7 @@ - + Apache HBase Shaded Packaging Invariants – Project Plugin Management @@ -271,7 +271,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-shaded-check-invariants/plugins.html -- diff --git a/hbase-shaded-check-invariants/plugins.html b/hbase-shaded-check-invariants/plugins.html index c48cb47..a77cf87 100644 --- a/hbase-shaded-check-invariants/plugins.html +++ b/hbase-shaded-check-invariants/plugins.html @@ -7,7 +7,7 @@ - + Apache HBase Shaded Packaging Invariants – Project Plugins @@ -218,7 +218,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-shaded-check-invariants/project-info.html -- diff --git a/hbase-shaded-check-invariants/project-info.html b/hbase-shaded-check-invariants/project-info.html index 441049f..55b699a 100644 --- a/hbase-shaded-check-invariants/project-info.html +++ b/hbase-shaded-check-invariants/project-info.html @@ -7,7 +7,7 @@ - + Apache HBase Shaded Packaging Invariants – Project Information @@ -170,7 +170,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-shaded-check-invariants/project-reports.html -- diff --git a/hbase-shaded-check-invariants/project-reports.html b/hbase-shaded-check-invariants/project-reports.html index 8b26086..ef25b37 100644 --- a/hbase-shaded-check-invariants/project-reports.html +++ b/hbase-shaded-check-invariants/project-reports.html @@ -7,7 +7,7 @@ - + Apache HBase Shaded Packaging Invariants – Generated Reports @@ -128,7 +128,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-shaded-check-invariants/project-summary.html -- diff --git a/hbase-shaded-check-invariants/project-summary.html b/hbase-shaded-check-invariants/project-summary.html index 91dee04..559e1a3 100644 --- a/hbase-shaded-check-invariants/project-summary.html +++ b/hbase-shaded-check-invariants/project-summary.html @@ -7,7 +7,7 @@ - + Apache HBase Shaded Packaging Invariants – Project Summary @@ -166,7 +166,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved.
[19/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotInputSplit.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotInputSplit.html b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotInputSplit.html index 87257da..add30e1 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotInputSplit.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotInputSplit.html @@ -116,1006 +116,1008 @@ 108 private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size"; 109 private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group"; 110 private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb"; -111 protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp"; -112 -113 static class Testing { -114static final String CONF_TEST_FAILURE = "test.snapshot.export.failure"; -115static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count"; -116int failuresCountToInject = 0; -117int injectedFailureCount = 0; -118 } -119 -120 // Command line options and defaults. -121 static final class Options { -122static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); -123static final Option TARGET_NAME = new Option(null, "target", true, -124"Target name for the snapshot."); -125static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " -126+ "destination hdfs://"); -127static final Option COPY_FROM = new Option(null, "copy-from", true, -128"Input folder hdfs:// (default hbase.rootdir)"); -129static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, -130"Do not verify checksum, use name+length only."); -131static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, -132"Do not verify the integrity of the exported snapshot."); -133static final Option OVERWRITE = new Option(null, "overwrite", false, -134"Rewrite the snapshot manifest if already exists."); -135static final Option CHUSER = new Option(null, "chuser", true, -136"Change the owner of the files to the specified one."); -137static final Option CHGROUP = new Option(null, "chgroup", true, -138"Change the group of the files to the specified one."); -139static final Option CHMOD = new Option(null, "chmod", true, -140"Change the permission of the files to the specified one."); -141static final Option MAPPERS = new Option(null, "mappers", true, -142"Number of mappers to use during the copy (mapreduce.job.maps)."); -143static final Option BANDWIDTH = new Option(null, "bandwidth", true, -144"Limit bandwidth to this value in MB/second."); -145 } -146 -147 // Export Map-Reduce Counters, to keep track of the progress -148 public enum Counter { -149MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, -150BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED -151 } -152 -153 private static class ExportMapper extends Mapper{ -155private static final Log LOG = LogFactory.getLog(ExportMapper.class); -156final static int REPORT_SIZE = 1 * 1024 * 1024; -157final static int BUFFER_SIZE = 64 * 1024; -158 -159private boolean verifyChecksum; -160private String filesGroup; -161private String filesUser; -162private short filesMode; -163private int bufferSize; -164 -165private FileSystem outputFs; -166private Path outputArchive; -167private Path outputRoot; -168 -169private FileSystem inputFs; -170private Path inputArchive; -171private Path inputRoot; -172 -173private static Testing testing = new Testing(); -174 -175@Override -176public void setup(Context context) throws IOException { -177 Configuration conf = context.getConfiguration(); -178 -179 Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); -180 Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); -181 -182 verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); -183 -184 filesGroup = conf.get(CONF_FILES_GROUP); -185 filesUser = conf.get(CONF_FILES_USER); -186 filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); -187 outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); -188 inputRoot = new Path(conf.
[13/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/export_control.html -- diff --git a/export_control.html b/export_control.html index 20c2bdb..c0d27db 100644 --- a/export_control.html +++ b/export_control.html @@ -7,7 +7,7 @@ - + Apache HBase – Export Control @@ -336,7 +336,7 @@ for more details. https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-annotations/checkstyle.html -- diff --git a/hbase-annotations/checkstyle.html b/hbase-annotations/checkstyle.html index d6bd2f0..7687f88 100644 --- a/hbase-annotations/checkstyle.html +++ b/hbase-annotations/checkstyle.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations – Checkstyle Results @@ -178,7 +178,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-annotations/dependencies.html -- diff --git a/hbase-annotations/dependencies.html b/hbase-annotations/dependencies.html index 007f690..257f31a 100644 --- a/hbase-annotations/dependencies.html +++ b/hbase-annotations/dependencies.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations – Project Dependencies @@ -272,7 +272,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-annotations/dependency-convergence.html -- diff --git a/hbase-annotations/dependency-convergence.html b/hbase-annotations/dependency-convergence.html index ff8ae87..516193a 100644 --- a/hbase-annotations/dependency-convergence.html +++ b/hbase-annotations/dependency-convergence.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations – Reactor Dependency Convergence @@ -763,7 +763,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-annotations/dependency-info.html -- diff --git a/hbase-annotations/dependency-info.html b/hbase-annotations/dependency-info.html index c925f38..afc2d68 100644 --- a/hbase-annotations/dependency-info.html +++ b/hbase-annotations/dependency-info.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations – Dependency Information @@ -147,7 +147,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-annotations/dependency-management.html -- diff --git a/hbase-annotations/dependency-management.html b/hbase-annotations/dependency-management.html index 7c86200..68bb14d 100644 --- a/hbase-annotations/dependency-management.html +++ b/hbase-annotations/dependency-management.html @@ -7,7 +7,7 @@ - + Apache HBase - Annotations – Project Dependency Management @@ -784,7 +784,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-29 + Last Published: 2017-10-31 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/hbase-annotations/index.html -- diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html index 6e85b5c..c0b1b2b 100644 --- a/hbase-annota
[03/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest.html index c97fa47..3b16d00 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest.html @@ -56,379 +56,381 @@ 048import org.junit.AfterClass; 049import org.junit.BeforeClass; 050import org.junit.Test; -051import org.junit.experimental.categories.Category; -052 +051import org.junit.Ignore; +052import org.junit.experimental.categories.Category; 053 -054@Category(MediumTests.class) -055public class TestReplicator extends TestReplicationBase { -056 -057 static final Log LOG = LogFactory.getLog(TestReplicator.class); -058 static final int NUM_ROWS = 10; -059 -060 @BeforeClass -061 public static void setUpBeforeClass() throws Exception { -062// Set RPC size limit to 10kb (will be applied to both source and sink clusters) -063 conf1.setInt(RpcServer.MAX_REQUEST_SIZE, 1024 * 10); -064 TestReplicationBase.setUpBeforeClass(); -065admin.removePeer("2"); // Remove the peer set up for us by base class -066 } -067 -068 @Test -069 public void testReplicatorBatching() throws Exception { -070// Clear the tables -071truncateTable(utility1, tableName); -072truncateTable(utility2, tableName); -073 -074// Replace the peer set up for us by the base class with a wrapper for this test -075 admin.addPeer("testReplicatorBatching", -076 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -077 .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null); -078 -079 ReplicationEndpointForTest.setBatchCount(0); -080 ReplicationEndpointForTest.setEntriesCount(0); -081try { -082 ReplicationEndpointForTest.pause(); -083 try { -084// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -085// have to be replicated separately. -086final byte[] valueBytes = new byte[8 *1024]; -087for (int i = 0; i < NUM_ROWS; i++) { -088 htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) -089.addColumn(famName, null, valueBytes) -090 ); -091} -092 } finally { -093 ReplicationEndpointForTest.resume(); -094 } -095 -096 // Wait for replication to complete. -097 Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() { -098@Override -099public boolean evaluate() throws Exception { -100 return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS; -101} -102 -103@Override -104public String explainFailure() throws Exception { -105 return "We waited too long for expected replication of " + NUM_ROWS + " entries"; -106} -107 }); -108 -109 assertEquals("We sent an incorrect number of batches", NUM_ROWS, -110 ReplicationEndpointForTest.getBatchCount()); -111 assertEquals("We did not replicate enough rows", NUM_ROWS, -112utility2.countRows(htable2)); -113} finally { -114 admin.removePeer("testReplicatorBatching"); -115} -116 } -117 -118 @Test -119 public void testReplicatorWithErrors() throws Exception { -120// Clear the tables -121truncateTable(utility1, tableName); -122truncateTable(utility2, tableName); -123 -124// Replace the peer set up for us by the base class with a wrapper for this test -125 admin.addPeer("testReplicatorWithErrors", -126 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -127 .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()), -128null); -129 -130 FailureInjectingReplicationEndpointForTest.setBatchCount(0); -131 FailureInjectingReplicationEndpointForTest.setEntriesCount(0); -132try { -133 FailureInjectingReplicationEndpointForTest.pause(); -134 try { -135// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -136// have to be replicated separately. -137final byte[] valueBytes = new byte[8 *1024]; -138for (int i = 0; i < NUM
[04/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface.html index c97fa47..3b16d00 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface.html @@ -56,379 +56,381 @@ 048import org.junit.AfterClass; 049import org.junit.BeforeClass; 050import org.junit.Test; -051import org.junit.experimental.categories.Category; -052 +051import org.junit.Ignore; +052import org.junit.experimental.categories.Category; 053 -054@Category(MediumTests.class) -055public class TestReplicator extends TestReplicationBase { -056 -057 static final Log LOG = LogFactory.getLog(TestReplicator.class); -058 static final int NUM_ROWS = 10; -059 -060 @BeforeClass -061 public static void setUpBeforeClass() throws Exception { -062// Set RPC size limit to 10kb (will be applied to both source and sink clusters) -063 conf1.setInt(RpcServer.MAX_REQUEST_SIZE, 1024 * 10); -064 TestReplicationBase.setUpBeforeClass(); -065admin.removePeer("2"); // Remove the peer set up for us by base class -066 } -067 -068 @Test -069 public void testReplicatorBatching() throws Exception { -070// Clear the tables -071truncateTable(utility1, tableName); -072truncateTable(utility2, tableName); -073 -074// Replace the peer set up for us by the base class with a wrapper for this test -075 admin.addPeer("testReplicatorBatching", -076 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -077 .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null); -078 -079 ReplicationEndpointForTest.setBatchCount(0); -080 ReplicationEndpointForTest.setEntriesCount(0); -081try { -082 ReplicationEndpointForTest.pause(); -083 try { -084// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -085// have to be replicated separately. -086final byte[] valueBytes = new byte[8 *1024]; -087for (int i = 0; i < NUM_ROWS; i++) { -088 htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) -089.addColumn(famName, null, valueBytes) -090 ); -091} -092 } finally { -093 ReplicationEndpointForTest.resume(); -094 } -095 -096 // Wait for replication to complete. -097 Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() { -098@Override -099public boolean evaluate() throws Exception { -100 return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS; -101} -102 -103@Override -104public String explainFailure() throws Exception { -105 return "We waited too long for expected replication of " + NUM_ROWS + " entries"; -106} -107 }); -108 -109 assertEquals("We sent an incorrect number of batches", NUM_ROWS, -110 ReplicationEndpointForTest.getBatchCount()); -111 assertEquals("We did not replicate enough rows", NUM_ROWS, -112utility2.countRows(htable2)); -113} finally { -114 admin.removePeer("testReplicatorBatching"); -115} -116 } -117 -118 @Test -119 public void testReplicatorWithErrors() throws Exception { -120// Clear the tables -121truncateTable(utility1, tableName); -122truncateTable(utility2, tableName); -123 -124// Replace the peer set up for us by the base class with a wrapper for this test -125 admin.addPeer("testReplicatorWithErrors", -126 new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()) -127 .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName()), -128null); -129 -130 FailureInjectingReplicationEndpointForTest.setBatchCount(0); -131 FailureInjectingReplicationEndpointForTest.setEntriesCount(0); -132try { -133 FailureInjectingReplicationEndpointForTest.pause(); -134 try { -135// Queue up a bunch of cells of size 8K. Because of RPC size limits, they will all -136// have to be replicated separately. -137final byte[] valueBytes = new byte[8 *1024]; -138for (int i = 0; i < NUM
[05/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.html index d28cf30..c97c8f0 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.html @@ -102,7 +102,7 @@ 094} 095 096// test 1 bucket -097long totalCellsLen = addRowsByKeys(memstore, keys1); +097long totalCellsLen = addRowsByKeysDataSize(memstore, keys1); 098long cellBeforeFlushSize = cellBeforeFlushSize(); 099long cellAfterFlushSize = cellAfterFlushSize(); 100long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; @@ -148,7 +148,7 @@ 140String[] keys1 = { "A", "A", "B", "C" }; 141String[] keys2 = { "A", "B", "D" }; 142 -143long totalCellsLen1 = addRowsByKeys(memstore, keys1); // INSERT 4 +143long totalCellsLen1 = addRowsByKeysDataSize(memstore, keys1); // INSERT 4 144long cellBeforeFlushSize = cellBeforeFlushSize(); 145long cellAfterFlushSize = cellAfterFlushSize(); 146long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; @@ -172,7 +172,7 @@ 164assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize()); 165assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); 166 -167long totalCellsLen2 = addRowsByKeys(memstore, keys2); // INSERT 3 (3+3=6) +167long totalCellsLen2 = addRowsByKeysDataSize(memstore, keys2); // INSERT 3 (3+3=6) 168long totalHeapSize2 = 3 * cellBeforeFlushSize; 169assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); 170assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); @@ -210,7 +210,7 @@ 202String[] keys2 = { "A", "B", "D" }; 203String[] keys3 = { "D", "B", "B" }; 204 -205long totalCellsLen1 = addRowsByKeys(memstore, keys1); +205long totalCellsLen1 = addRowsByKeysDataSize(memstore, keys1); 206long cellBeforeFlushSize = cellBeforeFlushSize(); 207long cellAfterFlushSize = cellAfterFlushSize(); 208long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; @@ -231,7 +231,7 @@ 223assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize()); 224assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); 225 -226long totalCellsLen2 = addRowsByKeys(memstore, keys2); +226long totalCellsLen2 = addRowsByKeysDataSize(memstore, keys2); 227long totalHeapSize2 = 3 * cellBeforeFlushSize; 228 229assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); @@ -245,7 +245,7 @@ 237assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); 238assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); 239 -240long totalCellsLen3 = addRowsByKeys(memstore, keys3); +240long totalCellsLen3 = addRowsByKeysDataSize(memstore, keys3); 241long totalHeapSize3 = 3 * cellBeforeFlushSize; 242assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, 243 regionServicesForStores.getMemStoreSize()); @@ -302,7 +302,7 @@ 294 memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, 295 String.valueOf(compactionType)); 296 ((CompactingMemStore)memstore).initiateType(compactionType); -297addRowsByKeys(memstore, keys1); +297addRowsByKeysDataSize(memstore, keys1); 298 299((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact 300 @@ -311,7 +311,7 @@ 303} 304assertEquals(0, memstore.getSnapshot().getCellsCount()); 305 -306addRowsByKeys(memstore, keys2); // also should only flatten +306addRowsByKeysDataSize(memstore, keys2); // also should only flatten 307 308int counter2 = 0; 309for ( Segment s : memstore.getSegments()) { @@ -330,7 +330,7 @@ 322} 323assertEquals(12, counter3); 324 -325addRowsByKeys(memstore, keys3); +325addRowsByKeysDataSize(memstore, keys3); 326 327int counter4 = 0; 328for ( Segment s : memstore.getSegments()) { @@ -612,49 +612,104 @@ 604 } 605 606 -607 private long addRowsByKeys(final AbstractMemStore hmc, String[] keys) { -608byte[] fam = Bytes.toBytes("testfamily"); -609byte[] qf = Bytes.toBytes("testqualifier"); -610MemStoreSizing memstoreSizing = new MemStoreSizing(); -611for (int i =
hbase git commit: HBASE-19130 Typo in HStore.initializeRegionInternals for replaying wal
Repository: hbase Updated Branches: refs/heads/branch-2 20b7120af -> 9dfd77595 HBASE-19130 Typo in HStore.initializeRegionInternals for replaying wal Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9dfd7759 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9dfd7759 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9dfd7759 Branch: refs/heads/branch-2 Commit: 9dfd77595ffde5f65aebf52fd29ec46de52e01f8 Parents: 20b7120 Author: zhangduo Authored: Tue Oct 31 16:46:45 2017 +0800 Committer: zhangduo Committed: Tue Oct 31 21:21:57 2017 +0800 -- .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/9dfd7759/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 648a415..bc11bce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -920,7 +920,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.mvcc.advanceTo(maxSeqId); } finally { // update the stores that we are done replaying -stores.forEach(HStore::startReplayingFromWAL); +stores.forEach(HStore::stopReplayingFromWAL); } } this.lastReplayedOpenRegionSeqId = maxSeqId;
hbase git commit: HBASE-19130 Typo in HStore.initializeRegionInternals for replaying wal
Repository: hbase Updated Branches: refs/heads/master 2f29bbb37 -> bbb7e1924 HBASE-19130 Typo in HStore.initializeRegionInternals for replaying wal Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bbb7e192 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bbb7e192 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bbb7e192 Branch: refs/heads/master Commit: bbb7e1924db2300ec95fd3beb8d252049dbcf826 Parents: 2f29bbb Author: zhangduo Authored: Tue Oct 31 16:46:45 2017 +0800 Committer: zhangduo Committed: Tue Oct 31 21:21:52 2017 +0800 -- .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/bbb7e192/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4d0f6d0..5f53fff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -920,7 +920,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.mvcc.advanceTo(maxSeqId); } finally { // update the stores that we are done replaying -stores.forEach(HStore::startReplayingFromWAL); +stores.forEach(HStore::stopReplayingFromWAL); } } this.lastReplayedOpenRegionSeqId = maxSeqId;
hbase git commit: HBASE-18232: Support Jumbo Chunks for CellChunkMap
Repository: hbase Updated Branches: refs/heads/master 5000652e5 -> 2f29bbb37 HBASE-18232: Support Jumbo Chunks for CellChunkMap Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2f29bbb3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2f29bbb3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2f29bbb3 Branch: refs/heads/master Commit: 2f29bbb373877bb69b0b36f5e59702711da39ca9 Parents: 5000652 Author: anastas Authored: Tue Oct 31 14:42:52 2017 +0200 Committer: anastas Committed: Tue Oct 31 14:42:52 2017 +0200 -- .../apache/hadoop/hbase/regionserver/Chunk.java | 4 + .../hadoop/hbase/regionserver/ChunkCreator.java | 69 + .../regionserver/ImmutableMemStoreLAB.java | 25 ++- .../hadoop/hbase/regionserver/MemStoreLAB.java | 22 +- .../hbase/regionserver/MemStoreLABImpl.java | 20 + .../hbase/regionserver/TestCellFlatSet.java | 78 .../regionserver/TestCompactingMemStore.java| 57 +- .../TestCompactingToCellFlatMapMemStore.java| 75 --- 8 files changed, 248 insertions(+), 102 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/2f29bbb3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java index 61cd591..6c3f471 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java @@ -85,6 +85,10 @@ public abstract class Chunk { return this.fromPool; } + boolean isJumbo() { +return size > ChunkCreator.getInstance().getChunkSize(); + } + /** * Actually claim the memory for this chunk. This should only be called from the thread that * constructed the chunk. It is thread-safe against other threads calling alloc(), who will block http://git-wip-us.apache.org/repos/asf/hbase/blob/2f29bbb3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java index 4dd1207..faf517b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java @@ -110,17 +110,27 @@ public class ChunkCreator { * @return the chunk that was initialized */ Chunk getChunk() { -return getChunk(CompactingMemStore.IndexType.ARRAY_MAP); +return getChunk(CompactingMemStore.IndexType.ARRAY_MAP, chunkSize); + } + + /** + * Creates and inits a chunk. The default implementation for specific index type. + * @return the chunk that was initialized + */ + Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) { +return getChunk(chunkIndexType, chunkSize); } /** * Creates and inits a chunk. * @return the chunk that was initialized * @param chunkIndexType whether the requested chunk is going to be used with CellChunkMap index + * @param size the size of the chunk to be allocated, in bytes */ - Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) { + Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, int size) { Chunk chunk = null; -if (pool != null) { +// if we have pool and this is not jumbo chunk (when size != chunkSize this is jumbo chunk) +if ((pool != null) && (size == chunkSize)) { // the pool creates the chunk internally. The chunk#init() call happens here chunk = this.pool.getChunk(); // the pool has run out of maxCount @@ -132,9 +142,9 @@ public class ChunkCreator { } } if (chunk == null) { - // the second boolean parameter means: - // if CellChunkMap index is requested, put allocated on demand chunk mapping into chunkIdMap - chunk = createChunk(false, chunkIndexType); + // the second parameter explains whether CellChunkMap index is requested, + // in that case, put allocated on demand chunk mapping into chunkIdMap + chunk = createChunk(false, chunkIndexType, size); } // now we need to actually do the expensive memory allocation step in case of a new chunk, @@ -144,20 +154,37 @@ public class ChunkCreator { } /** + * Creates and inits a chunk of a special size, bigger than a regular chunk size. + * Such a chunk will never come from pool and will always be