[2/2] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication
HBASE-19781 Add a new cluster state flag for synchronous replication Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7a1b7170 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7a1b7170 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7a1b7170 Branch: refs/heads/HBASE-19064 Commit: 7a1b7170527f7fd13c91a9f1ed2b716779a544c4 Parents: b56bf0d Author: Guanghao ZhangAuthored: Mon Jan 22 11:44:49 2018 +0800 Committer: Guanghao Zhang Committed: Wed Jan 24 13:35:52 2018 +0800 -- .../org/apache/hadoop/hbase/client/Admin.java | 39 + .../apache/hadoop/hbase/client/AsyncAdmin.java | 31 .../hadoop/hbase/client/AsyncHBaseAdmin.java| 7 + .../hbase/client/ConnectionImplementation.java | 9 ++ .../apache/hadoop/hbase/client/HBaseAdmin.java | 26 +++ .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 15 ++ .../client/ShortCircuitMasterConnection.java| 9 ++ .../replication/ReplicationPeerConfigUtil.java | 26 +-- .../replication/ReplicationPeerDescription.java | 10 +- .../hbase/replication/SyncReplicationState.java | 48 ++ .../hbase/shaded/protobuf/RequestConverter.java | 10 ++ .../src/main/protobuf/Master.proto | 4 + .../src/main/protobuf/MasterProcedure.proto | 6 +- .../src/main/protobuf/Replication.proto | 20 +++ .../replication/ReplicationPeerStorage.java | 18 ++- .../hbase/replication/ReplicationUtils.java | 1 + .../replication/ZKReplicationPeerStorage.java | 60 +-- .../replication/TestReplicationStateBasic.java | 23 ++- .../TestZKReplicationPeerStorage.java | 12 +- .../hbase/coprocessor/MasterObserver.java | 23 +++ .../org/apache/hadoop/hbase/master/HMaster.java | 12 ++ .../hbase/master/MasterCoprocessorHost.java | 21 +++ .../hadoop/hbase/master/MasterRpcServices.java | 17 ++ .../hadoop/hbase/master/MasterServices.java | 9 ++ .../procedure/PeerProcedureInterface.java | 2 +- .../replication/ReplicationPeerManager.java | 51 +- ...ransitPeerSyncReplicationStateProcedure.java | 159 +++ .../hbase/security/access/AccessController.java | 8 + .../replication/TestReplicationAdmin.java | 62 .../hbase/master/MockNoopMasterServices.java| 11 +- .../cleaner/TestReplicationHFileCleaner.java| 4 +- .../TestReplicationTrackerZKImpl.java | 6 +- .../TestReplicationSourceManager.java | 3 +- .../security/access/TestAccessController.java | 16 ++ .../hbase/util/TestHBaseFsckReplication.java| 5 +- .../src/main/ruby/hbase/replication_admin.rb| 15 ++ hbase-shell/src/main/ruby/shell.rb | 1 + .../src/main/ruby/shell/commands/list_peers.rb | 6 +- .../transit_peer_sync_replication_state.rb | 44 + .../test/ruby/hbase/replication_admin_test.rb | 24 +++ 40 files changed, 818 insertions(+), 55 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/7a1b7170/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 6729473..a1a4f3f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; @@ -2641,6 +2642,44 @@ public interface Admin extends Abortable, Closeable { List listReplicationPeers(Pattern pattern) throws IOException; /** + * Transit current cluster to a new state in a synchronous replication peer. + * @param peerId a short name that identifies the peer + * @param state a new state of current cluster + * @throws IOException if a remote or network exception occurs + */ + void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state) + throws IOException; + + /** + * Transit current cluster to a new state in a synchronous replication peer. But does not block + * and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the
[1/2] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication
Repository: hbase Updated Branches: refs/heads/HBASE-19064 b56bf0dbb -> 7a1b71705 http://git-wip-us.apache.org/repos/asf/hbase/blob/7a1b7170/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java index e64255c..99b9a04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; +import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; @@ -61,9 +62,9 @@ public class TestHBaseFsckReplication { String peerId1 = "1"; String peerId2 = "2"; peerStorage.addPeer(peerId1, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(), - true); + true, SyncReplicationState.NONE); peerStorage.addPeer(peerId2, ReplicationPeerConfig.newBuilder().setClusterKey("key").build(), - true); + true, SyncReplicationState.NONE); for (int i = 0; i < 10; i++) { queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + i), peerId1, "file-" + i); http://git-wip-us.apache.org/repos/asf/hbase/blob/7a1b7170/hbase-shell/src/main/ruby/hbase/replication_admin.rb -- diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb index ba7d191..d5d4844 100644 --- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -20,6 +20,7 @@ include Java java_import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil +java_import org.apache.hadoop.hbase.replication.SyncReplicationState java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig java_import org.apache.hadoop.hbase.util.Bytes java_import org.apache.hadoop.hbase.zookeeper.ZKConfig @@ -329,6 +330,20 @@ module Hbase '!' + ReplicationPeerConfigUtil.convertToString(tableCFs) end +# Transit current cluster to a new state in the specified synchronous +# replication peer +def transit_peer_sync_replication_state(id, state) + if 'ACTIVE'.eql?(state) +@admin.transitReplicationPeerSyncReplicationState(id, SyncReplicationState::ACTIVE) + elsif 'DOWNGRADE_ACTIVE'.eql?(state) +@admin.transitReplicationPeerSyncReplicationState(id, SyncReplicationState::DOWNGRADE_ACTIVE) + elsif 'STANDBY'.eql?(state) +@admin.transitReplicationPeerSyncReplicationState(id, SyncReplicationState::STANDBY) + else +raise(ArgumentError, 'synchronous replication state must be ACTIVE, DOWNGRADE_ACTIVE or STANDBY') + end +end + #-- # Enables a table's replication switch def enable_tablerep(table_name) http://git-wip-us.apache.org/repos/asf/hbase/blob/7a1b7170/hbase-shell/src/main/ruby/shell.rb -- diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 4472021..a5b12fd 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -394,6 +394,7 @@ Shell.load_command_group( get_peer_config list_peer_configs update_peer_config +transit_peer_sync_replication_state ] ) http://git-wip-us.apache.org/repos/asf/hbase/blob/7a1b7170/hbase-shell/src/main/ruby/shell/commands/list_peers.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb index caeab86..aa10fda 100644 --- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb +++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb @@ -39,8 +39,8 @@ EOF peers = replication_admin.list_peers formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME -REMOTE_ROOT_DIR STATE REPLICATE_ALL -NAMESPACES TABLE_CFS BANDWIDTH]) +REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE +
[1/2] hbase git commit: HBASE-19757 System table gets stuck after enabling region server group feature in secure cluster
Repository: hbase Updated Branches: refs/heads/branch-1 73ed12703 -> 3601bd295 refs/heads/branch-1.4 a75533ba5 -> 636376b28 HBASE-19757 System table gets stuck after enabling region server group feature in secure cluster HBASE-19757 System table gets stuck after enabling region server group feature in secure cluster - addendum removes unused import Amending-Author: Andrew PurtellProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3601bd29 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3601bd29 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3601bd29 Branch: refs/heads/branch-1 Commit: 3601bd29582cdfe0fae00f11d882665c40ee4773 Parents: 73ed127 Author: Andrew Purtell Authored: Tue Jan 23 18:37:33 2018 -0800 Committer: Andrew Purtell Committed: Tue Jan 23 18:40:43 2018 -0800 -- .../hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java | 15 +++ 1 file changed, 3 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/3601bd29/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 350d850..e4401c0 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -27,6 +27,7 @@ import com.google.common.collect.Sets; import com.google.protobuf.ServiceException; import java.io.IOException; +import java.util.Arrays; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableStateManager; @@ -313,17 +313,8 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene orphanTables.add(TableName.valueOf(entry)); } -List specialTables; -if(!master.isInitialized()) { - specialTables = new ArrayList(); - specialTables.add(AccessControlLists.ACL_TABLE_NAME); - specialTables.add(TableName.META_TABLE_NAME); - specialTables.add(TableName.NAMESPACE_TABLE_NAME); - specialTables.add(RSGROUP_TABLE_NAME); -} else { - specialTables = - master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); -} +final List specialTables = Arrays.asList(AccessControlLists.ACL_TABLE_NAME, +TableName.META_TABLE_NAME, TableName.NAMESPACE_TABLE_NAME, RSGROUP_TABLE_NAME); for(TableName table : specialTables) { orphanTables.add(table);
[2/2] hbase git commit: HBASE-19757 System table gets stuck after enabling region server group feature in secure cluster
HBASE-19757 System table gets stuck after enabling region server group feature in secure cluster HBASE-19757 System table gets stuck after enabling region server group feature in secure cluster - addendum removes unused import Amending-Author: Andrew PurtellProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/636376b2 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/636376b2 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/636376b2 Branch: refs/heads/branch-1.4 Commit: 636376b28d1fed368d72b8f1c58d1bc437ffd5f8 Parents: a75533b Author: Andrew Purtell Authored: Tue Jan 23 18:37:33 2018 -0800 Committer: Andrew Purtell Committed: Tue Jan 23 18:41:39 2018 -0800 -- .../hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java | 15 +++ 1 file changed, 3 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/636376b2/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 350d850..e4401c0 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -27,6 +27,7 @@ import com.google.common.collect.Sets; import com.google.protobuf.ServiceException; import java.io.IOException; +import java.util.Arrays; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableStateManager; @@ -313,17 +313,8 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene orphanTables.add(TableName.valueOf(entry)); } -List specialTables; -if(!master.isInitialized()) { - specialTables = new ArrayList(); - specialTables.add(AccessControlLists.ACL_TABLE_NAME); - specialTables.add(TableName.META_TABLE_NAME); - specialTables.add(TableName.NAMESPACE_TABLE_NAME); - specialTables.add(RSGROUP_TABLE_NAME); -} else { - specialTables = - master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); -} +final List specialTables = Arrays.asList(AccessControlLists.ACL_TABLE_NAME, +TableName.META_TABLE_NAME, TableName.NAMESPACE_TABLE_NAME, RSGROUP_TABLE_NAME); for(TableName table : specialTables) { orphanTables.add(table);
[2/2] hbase git commit: HBASE-15321 - Ability to open a HRegion from hdfs snapshot.
HBASE-15321 - Ability to open a HRegion from hdfs snapshot. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a75533ba Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a75533ba Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a75533ba Branch: refs/heads/branch-1.4 Commit: a75533ba550b55bb740be3d33538f163d8a4cf72 Parents: 02547d2 Author: Rahul GidwaniAuthored: Mon Jan 22 12:22:57 2018 -0800 Committer: Andrew Purtell Committed: Tue Jan 23 16:20:00 2018 -0800 -- .../hadoop/hbase/regionserver/HRegion.java | 25 .../hbase/regionserver/HRegionFileSystem.java | 3 +- .../regionserver/TestHdfsSnapshotHRegion.java | 115 +++ 3 files changed, 142 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/a75533ba/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index c006a44..e95a392 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -6998,6 +6998,31 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return this; } + /** + * Open a Region on a read-only file-system (like hdfs snapshots) + * @param conf The Configuration object to use. + * @param fs Filesystem to use + * @param info Info for region to be opened. + * @param htd the table descriptor + * @return new HRegion + * @throws IOException e + */ + public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, final FileSystem fs, + final Path tableDir, HRegionInfo info, final HTableDescriptor htd) throws IOException { +if (info == null) { + throw new NullPointerException("Passed region info is null"); +} +if (LOG.isDebugEnabled()) { + LOG.debug("Opening region (readOnly filesystem): " + info); +} +if (info.getReplicaId() <= 0) { + info = new HRegionInfo((HRegionInfo) info, 1); +} +HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null); +r.writestate.setReadOnly(true); +return r.openHRegion(null); + } + public static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd, final WAL wal, final Configuration conf, final RegionServerServices rsServices, http://git-wip-us.apache.org/repos/asf/hbase/blob/a75533ba/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 3a0b30a..884485c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -75,7 +76,7 @@ public class HRegionFileSystem { public static final String REGION_SPLITS_DIR = ".splits"; /** Temporary subdirectory of the region directory used for compaction output. */ - private static final String REGION_TEMP_DIR = ".tmp"; + @VisibleForTesting static final String REGION_TEMP_DIR = ".tmp"; private final HRegionInfo regionInfo; //regionInfo for interacting with FS (getting encodedName, etc) http://git-wip-us.apache.org/repos/asf/hbase/blob/a75533ba/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java new file mode 100644 index 000..64c3735 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java @@ -0,0 +1,115 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information
[1/2] hbase git commit: HBASE-19163 "Maximum lock count exceeded" from region server's batch processing
Repository: hbase Updated Branches: refs/heads/branch-1.4 4531555f6 -> a75533ba5 HBASE-19163 "Maximum lock count exceeded" from region server's batch processing Signed-off-by: Umesh AgasheSigned-off-by: Michael Stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/02547d2f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/02547d2f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/02547d2f Branch: refs/heads/branch-1.4 Commit: 02547d2f3b258fb7e3a26a0e29817845af364858 Parents: 4531555 Author: huaxiangsun Authored: Fri Jan 19 11:22:00 2018 -0800 Committer: Andrew Purtell Committed: Tue Jan 23 16:19:52 2018 -0800 -- .../hadoop/hbase/regionserver/HRegion.java | 39 .../hbase/client/TestFromClientSide3.java | 27 ++ .../hbase/regionserver/TestAtomicOperation.java | 6 +-- 3 files changed, 62 insertions(+), 10 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/02547d2f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index faab525..c006a44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -3176,6 +3176,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // We try to set up a batch in the range [firstIndex,lastIndexExclusive) int firstIndex = batchOp.nextIndexToProcess; int lastIndexExclusive = firstIndex; +RowLock prevRowLock = null; boolean success = false; int noOfPuts = 0, noOfDeletes = 0; WALKey walKey = null; @@ -3257,7 +3258,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi boolean shouldBlock = numReadyToWrite == 0; RowLock rowLock = null; try { - rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock); + rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock, prevRowLock); } catch (TimeoutIOException e) { // We will retry when other exceptions, but we should stop if we timeout . throw e; @@ -3271,7 +3272,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi break; } else { - acquiredRowLocks.add(rowLock); + if (rowLock != prevRowLock) { +// It is a different row now, add this to the acquiredRowLocks and +// set prevRowLock to the new returned rowLock +acquiredRowLocks.add(rowLock); +prevRowLock = rowLock; + } } lastIndexExclusive++; @@ -3368,7 +3374,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi checkAndPrepareMutation(cpMutation, isInReplay, cpFamilyMap, now); // Acquire row locks. If not, the whole batch will fail. - acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), true, true)); + acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), true, true, null)); // Returned mutations from coprocessor correspond to the Mutation at index i. We can // directly add the cells from those mutations to the familyMaps of this mutation. @@ -5464,17 +5470,17 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public RowLock getRowLock(byte[] row, boolean readLock, boolean waitForLock) throws IOException { // Make sure the row is inside of this region before getting the lock for it. checkRow(row, "row lock"); -return getRowLockInternal(row, readLock, waitForLock); +return getRowLockInternal(row, readLock, waitForLock, null); } // getRowLock calls checkRow. Call this to skip checkRow. protected RowLock getRowLockInternal(byte[] row) throws IOException { -return getRowLockInternal(row, false, true); +return getRowLockInternal(row, false, true, null); } - protected RowLock getRowLockInternal(byte[] row, boolean readLock, boolean waitForLock) - throws IOException { + protected RowLock getRowLockInternal(byte[] row, boolean readLock, boolean waitForLock, + final RowLock prevRowLock) throws IOException { // create an object to use a a key in the row lock map HashedBytes rowKey = new HashedBytes(row); @@ -5508,6 +5514,14 @@ public class HRegion implements
hbase git commit: HBASE-19780 Fix build in branch-2 - change execution phase of checkstyle plugin back to default 'verify'
Repository: hbase Updated Branches: refs/heads/branch-2 8977aae43 -> b0b2d12d9 HBASE-19780 Fix build in branch-2 - change execution phase of checkstyle plugin back to default 'verify' Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0b2d12d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0b2d12d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0b2d12d Branch: refs/heads/branch-2 Commit: b0b2d12d9d434795a19167303dd3310ecd4b32a7 Parents: 8977aae Author: Apekshit SharmaAuthored: Fri Jan 12 15:06:08 2018 -0800 Committer: Apekshit Sharma Committed: Tue Jan 23 15:28:37 2018 -0800 -- hbase-annotations/pom.xml | 15 ++- hbase-archetypes/pom.xml | 15 ++- hbase-build-support/hbase-error-prone/pom.xml | 15 ++- hbase-external-blockcache/pom.xml | 15 ++- hbase-metrics-api/pom.xml | 15 ++- hbase-metrics/pom.xml | 15 ++- hbase-protocol-shaded/pom.xml | 51 +- pom.xml | 26 ++- 8 files changed, 43 insertions(+), 124 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b0b2d12d/hbase-annotations/pom.xml -- diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 6ad3c12..8e73769 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -36,18 +36,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/b0b2d12d/hbase-archetypes/pom.xml -- diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index bc96c27..8f755f0 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -81,18 +81,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/b0b2d12d/hbase-build-support/hbase-error-prone/pom.xml -- diff --git a/hbase-build-support/hbase-error-prone/pom.xml b/hbase-build-support/hbase-error-prone/pom.xml index 00dad96..b9e5e1e 100644 --- a/hbase-build-support/hbase-error-prone/pom.xml +++ b/hbase-build-support/hbase-error-prone/pom.xml @@ -71,18 +71,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/b0b2d12d/hbase-external-blockcache/pom.xml -- diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml index e9d4e60..4de7f34 100644 --- a/hbase-external-blockcache/pom.xml +++ b/hbase-external-blockcache/pom.xml @@ -98,18 +98,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/b0b2d12d/hbase-metrics-api/pom.xml -- diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml index 0327204..3389f7e 100644 --- a/hbase-metrics-api/pom.xml +++ b/hbase-metrics-api/pom.xml @@ -71,18 +71,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/b0b2d12d/hbase-metrics/pom.xml -- diff --git
hbase git commit: HBASE-19780 Remove execution phase and goal of checkstyle plugin.
Repository: hbase Updated Branches: refs/heads/master d589b7238 -> 0b6422658 HBASE-19780 Remove execution phase and goal of checkstyle plugin. We don't do checkstyle as part of default "mvn install" since it takes significant amount of time (~90sec) and slows builds and development. We only need it for pre-commits. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b642265 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b642265 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b642265 Branch: refs/heads/master Commit: 0b6422658c6adff8e38d785b3cec4d2cf3b3bc93 Parents: d589b72 Author: Apekshit SharmaAuthored: Fri Jan 12 15:06:08 2018 -0800 Committer: Apekshit Sharma Committed: Tue Jan 23 15:08:04 2018 -0800 -- hbase-annotations/pom.xml | 15 ++- hbase-archetypes/pom.xml | 15 ++- hbase-build-support/hbase-error-prone/pom.xml | 15 ++- hbase-external-blockcache/pom.xml | 15 ++- hbase-hadoop-compat/pom.xml | 15 ++- hbase-metrics-api/pom.xml | 15 ++- hbase-metrics/pom.xml | 15 ++- hbase-protocol-shaded/pom.xml | 51 +- hbase-protocol/pom.xml| 15 ++- hbase-replication/pom.xml | 15 ++- hbase-rsgroup/pom.xml | 15 ++- hbase-spark-it/pom.xml| 15 ++- hbase-spark/pom.xml | 15 ++- hbase-thrift/pom.xml | 15 ++- pom.xml | 26 ++- 15 files changed, 64 insertions(+), 208 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0b642265/hbase-annotations/pom.xml -- diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 86eb313..94c506b 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -36,18 +36,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/0b642265/hbase-archetypes/pom.xml -- diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index 58ac89a..f33a216 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -81,18 +81,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/0b642265/hbase-build-support/hbase-error-prone/pom.xml -- diff --git a/hbase-build-support/hbase-error-prone/pom.xml b/hbase-build-support/hbase-error-prone/pom.xml index b90d733..343bb3e 100644 --- a/hbase-build-support/hbase-error-prone/pom.xml +++ b/hbase-build-support/hbase-error-prone/pom.xml @@ -71,18 +71,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/0b642265/hbase-external-blockcache/pom.xml -- diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml index 8b048c5..ce79e1a 100644 --- a/hbase-external-blockcache/pom.xml +++ b/hbase-external-blockcache/pom.xml @@ -98,18 +98,9 @@ org.apache.maven.plugins maven-checkstyle-plugin - - -checkstyle -validate - - check - - - true - - - + + true + http://git-wip-us.apache.org/repos/asf/hbase/blob/0b642265/hbase-hadoop-compat/pom.xml -- diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml index 512a466..da06e77 100644 ---
[1/2] hbase git commit: HBASE-18891 Upgrade to netty-all 4.0.50.Final
Repository: hbase Updated Branches: refs/heads/branch-1.2 46e199d9a -> 4a6faab40 refs/heads/branch-1.3 bce3d89b0 -> 95befea83 HBASE-18891 Upgrade to netty-all 4.0.50.Final Not moved to 4.0.52 because of Java8 compatibility reasons Signed-off-by: Ted YuProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95befea8 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95befea8 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95befea8 Branch: refs/heads/branch-1.3 Commit: 95befea83d2ebf8451498b13119c98f2182acdc4 Parents: bce3d89 Author: Josh Elser Authored: Wed Sep 27 11:58:44 2017 -0400 Committer: Josh Elser Committed: Tue Jan 23 10:57:40 2018 -0500 -- pom.xml | 5 - 1 file changed, 4 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/95befea8/pom.xml -- diff --git a/pom.xml b/pom.xml index 6cb6923..92fe9bf 100644 --- a/pom.xml +++ b/pom.xml @@ -1278,7 +1278,10 @@ 4.0.3 2.4.1 1.3.3 -4.0.23.Final + +4.0.50.Final 3.6.2.Final 2.1.2 1.0.8
[2/2] hbase git commit: HBASE-18891 Upgrade to netty-all 4.0.50.Final
HBASE-18891 Upgrade to netty-all 4.0.50.Final Not moved to 4.0.52 because of Java8 compatibility reasons Signed-off-by: Ted YuProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4a6faab4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4a6faab4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4a6faab4 Branch: refs/heads/branch-1.2 Commit: 4a6faab401e2124dc55cf07c5f7a73ad42ad93af Parents: 46e199d Author: Josh Elser Authored: Wed Sep 27 11:58:44 2017 -0400 Committer: Josh Elser Committed: Tue Jan 23 11:19:42 2018 -0500 -- pom.xml | 5 - 1 file changed, 4 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/4a6faab4/pom.xml -- diff --git a/pom.xml b/pom.xml index 408f11a..00241fd 100644 --- a/pom.xml +++ b/pom.xml @@ -1288,7 +1288,10 @@ 4.0.3 2.4.1 1.3.3 -4.0.23.Final + +4.0.50.Final 3.6.2.Final 2.1.2 1.0.8
hbase git commit: HBASE-19847 Fix findbugs and error-prone warnings in hbase-thrift (branch-2)
Repository: hbase Updated Branches: refs/heads/master 970636c5a -> d589b7238 HBASE-19847 Fix findbugs and error-prone warnings in hbase-thrift (branch-2) Signed-off-by: tedyuProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d589b723 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d589b723 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d589b723 Branch: refs/heads/master Commit: d589b7238257c78dc8616e47ab5bb3e2309a5237 Parents: 970636c Author: Peter Somogyi Authored: Tue Jan 23 15:54:26 2018 +0100 Committer: tedyu Committed: Tue Jan 23 08:08:34 2018 -0800 -- .../hadoop/hbase/thrift/IncrementCoalescer.java | 24 +++ .../hbase/thrift/TBoundedThreadPoolServer.java | 2 + .../hadoop/hbase/thrift/ThriftServerRunner.java | 2 +- .../thrift2/ThriftHBaseServiceHandler.java | 2 +- .../hadoop/hbase/thrift/TestCallQueue.java | 3 +- .../hadoop/hbase/thrift/TestThriftServer.java | 34 ++-- .../thrift2/TestThriftHBaseServiceHandler.java | 162 +-- ...TestThriftHBaseServiceHandlerWithLabels.java | 35 ++-- ...stThriftHBaseServiceHandlerWithReadOnly.java | 36 ++--- 9 files changed, 161 insertions(+), 139 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/d589b723/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java -- diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index 0dacf8b..3dad28a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -158,6 +158,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { namePrefix = "ICV-" + poolNumber.getAndIncrement() + "-thread-"; } +@Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0); @@ -348,49 +349,72 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { } // MBean get/set methods + @Override public int getQueueSize() { return pool.getQueue().size(); } + + @Override public int getMaxQueueSize() { return this.maxQueueSize; } + + @Override public void setMaxQueueSize(int newSize) { this.maxQueueSize = newSize; } + @Override public long getPoolCompletedTaskCount() { return pool.getCompletedTaskCount(); } + + @Override public long getPoolTaskCount() { return pool.getTaskCount(); } + + @Override public int getPoolLargestPoolSize() { return pool.getLargestPoolSize(); } + + @Override public int getCorePoolSize() { return pool.getCorePoolSize(); } + + @Override public void setCorePoolSize(int newCoreSize) { pool.setCorePoolSize(newCoreSize); } + + @Override public int getMaxPoolSize() { return pool.getMaximumPoolSize(); } + + @Override public void setMaxPoolSize(int newMaxSize) { pool.setMaximumPoolSize(newMaxSize); } + + @Override public long getFailedIncrements() { return failedIncrements.sum(); } + @Override public long getSuccessfulCoalescings() { return successfulCoalescings.sum(); } + @Override public long getTotalIncrements() { return totalIncrements.sum(); } + @Override public long getCountersMapSize() { return countersMap.size(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d589b723/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java -- diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java index 4926c8b..f9b2532 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java @@ -161,6 +161,7 @@ public class TBoundedThreadPoolServer extends TServer { serverOptions = options; } + @Override public void serve() { try { serverTransport_.listen(); @@ -274,6 +275,7 @@ public class TBoundedThreadPoolServer extends TServer { /** * Loops on processing a client forever */ +@Override public void run() { TProcessor processor = null; TTransport inputTransport = null;
[12/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html index a36c52a..196d515 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class HMobStore +public class HMobStore extends HStore The store implementation to save MOBs (medium objects), it extends the HStore. When a descriptor of a column family has the value "IS_MOB", it means this column family @@ -154,19 +154,19 @@ extends Field and Description -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsCountCompactedFromMob -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsCountCompactedToMob -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsSizeCompactedFromMob -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsSizeCompactedToMob @@ -198,23 +198,23 @@ extends mobFamilyPath -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong mobFlushCount -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong mobFlushedCellsCount -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong mobFlushedCellsSize -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong mobScanCellsCount -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong mobScanCellsSize @@ -501,7 +501,7 @@ extends LOG -private static finalorg.slf4j.Logger LOG +private static finalorg.slf4j.Logger LOG @@ -510,7 +510,7 @@ extends mobCacheConfig -privateMobCacheConfig mobCacheConfig +privateMobCacheConfig mobCacheConfig @@ -519,7 +519,7 @@ extends homePath -privateorg.apache.hadoop.fs.Path homePath +privateorg.apache.hadoop.fs.Path homePath @@ -528,7 +528,7 @@ extends mobFamilyPath -privateorg.apache.hadoop.fs.Path mobFamilyPath +privateorg.apache.hadoop.fs.Path mobFamilyPath @@ -537,7 +537,7 @@ extends cellsCountCompactedToMob -private volatilelong cellsCountCompactedToMob +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsCountCompactedToMob @@ -546,7 +546,7 @@ extends cellsCountCompactedFromMob -private volatilelong cellsCountCompactedFromMob +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsCountCompactedFromMob @@ -555,7 +555,7 @@ extends cellsSizeCompactedToMob -private volatilelong cellsSizeCompactedToMob +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsSizeCompactedToMob @@ -564,7 +564,7 @@ extends cellsSizeCompactedFromMob -private volatilelong cellsSizeCompactedFromMob +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong cellsSizeCompactedFromMob @@ -573,7 +573,7 @@ extends mobFlushCount -private volatilelong mobFlushCount +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in
[41/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/checkstyle.rss -- diff --git a/checkstyle.rss b/checkstyle.rss index 7814192..4b9c840 100644 --- a/checkstyle.rss +++ b/checkstyle.rss @@ -25,8 +25,8 @@ under the License. en-us 2007 - 2018 The Apache Software Foundation - File: 3500, - Errors: 18095, + File: 3503, + Errors: 17563, Warnings: 0, Infos: 0 @@ -531,7 +531,7 @@ under the License. 0 - 11 + 2 @@ -797,7 +797,7 @@ under the License. 0 - 7 + 6 @@ -895,7 +895,7 @@ under the License. 0 - 8 + 0 @@ -2687,7 +2687,7 @@ under the License. 0 - 7 + 6 @@ -3275,7 +3275,7 @@ under the License. 0 - 10 + 3 @@ -4591,7 +4591,7 @@ under the License. 0 - 10 + 9 @@ -6383,7 +6383,7 @@ under the License. 0 - 8 + 0 @@ -6635,7 +6635,7 @@ under the License. 0 - 25 + 23 @@ -6691,7 +6691,7 @@ under the License. 0 - 3 + 0 @@ -6789,7 +6789,7 @@ under the License. 0 - 201 + 14 @@ -8581,7 +8581,7 @@ under the License. 0 - 52 + 17 @@ -8609,7 +8609,7 @@ under the License. 0 - 7 + 9 @@ -8805,7 +8805,7 @@ under the License. 0 - 1 + 0 @@ -9561,7 +9561,7 @@ under the License. 0 - 7 + 1 @@ -11227,7 +11227,7 @@ under the License. 0 - 31 + 30 @@ -11325,7 +11325,7 @@ under the License. 0 - 32 + 3 @@ -12711,7 +12711,7 @@ under the License. 0 - 1 + 0 @@ -13033,7 +13033,7 @@ under the License. 0 - 1 + 0 @@ -14069,7 +14069,7 @@ under the License. 0 - 6 + 5 @@ -15357,7 +15357,7 @@ under the License. 0 - 2 + 1 @@ -15637,7 +15637,7 @@ under the License. 0 - 2 + 1 @@ -15875,7 +15875,7 @@ under the License. 0 - 13 + 11 @@
[47/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html -- diff --git a/apidocs/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html b/apidocs/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html index 074e0a4..8814b9e 100644 --- a/apidocs/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html +++ b/apidocs/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html @@ -287,7 +287,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool doWork -publicintdoWork() +publicintdoWork() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException, http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedException Description copied from class:org.apache.hadoop.hbase.util.AbstractHBaseTool @@ -307,7 +307,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool addOptions -protectedvoidaddOptions() +protectedvoidaddOptions() Description copied from class:org.apache.hadoop.hbase.util.AbstractHBaseTool Override this to add command-line options using AbstractHBaseTool.addOptWithArg(java.lang.String, java.lang.String) and similar methods. @@ -323,7 +323,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool processOptions -protectedvoidprocessOptions(org.apache.commons.cli.CommandLinecmd) +protectedvoidprocessOptions(org.apache.commons.cli.CommandLinecmd) Description copied from class:org.apache.hadoop.hbase.util.AbstractHBaseTool This method is called to process the options after they have been parsed. @@ -338,7 +338,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool printUsage -protectedvoidprintUsage() +protectedvoidprintUsage() Overrides: printUsagein classorg.apache.hadoop.hbase.util.AbstractHBaseTool @@ -351,7 +351,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool getSnapshotStats -public staticorg.apache.hadoop.hbase.snapshot.SnapshotInfo.SnapshotStatsgetSnapshotStats(org.apache.hadoop.conf.Configurationconf, +public staticorg.apache.hadoop.hbase.snapshot.SnapshotInfo.SnapshotStatsgetSnapshotStats(org.apache.hadoop.conf.Configurationconf, SnapshotDescriptionsnapshot) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Returns the snapshot stats @@ -372,7 +372,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool getSnapshotStats -public staticorg.apache.hadoop.hbase.snapshot.SnapshotInfo.SnapshotStatsgetSnapshotStats(org.apache.hadoop.conf.Configurationconf, +public staticorg.apache.hadoop.hbase.snapshot.SnapshotInfo.SnapshotStatsgetSnapshotStats(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshotDesc, http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maporg.apache.hadoop.fs.Path,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true; title="class or interface in java.lang">IntegerfilesMap) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -395,7 +395,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool getSnapshotList -public statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListSnapshotDescriptiongetSnapshotList(org.apache.hadoop.conf.Configurationconf) +public statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListSnapshotDescriptiongetSnapshotList(org.apache.hadoop.conf.Configurationconf) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Returns the list of available snapshots in the specified location @@ -414,7 +414,7 @@ extends org.apache.hadoop.hbase.util.AbstractHBaseTool getSnapshotsFilesMap -public statichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in
[04/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html index 82c1efb..6796a10 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class HStore +public class HStore extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver A Store holds a column family in a Region. Its a memstore and a set of zero @@ -218,11 +218,11 @@ implements COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong compactedCellsCount -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong compactedCellsSize @@ -278,15 +278,15 @@ implements FIXED_OVERHEAD -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong flushedCellsCount -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong flushedCellsSize -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong flushedOutputFileSize @@ -316,11 +316,11 @@ implements LOG -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong majorCompactedCellsCount -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong majorCompactedCellsSize @@ -356,11 +356,11 @@ implements storeEngine -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong storeSize -private long +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong totalUncompressedBytes @@ -1226,7 +1226,7 @@ implements MEMSTORE_CLASS_NAME -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MEMSTORE_CLASS_NAME +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MEMSTORE_CLASS_NAME See Also: Constant Field Values @@ -1239,7 +1239,7 @@ implements COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY See Also: Constant Field Values @@ -1252,7 +1252,7 @@ implements BLOCKING_STOREFILES_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY See Also: Constant Field Values @@ -1265,7 +1265,7 @@ implements BLOCK_STORAGE_POLICY_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY See Also: Constant Field Values @@ -1278,7 +1278,7 @@ implements
[44/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/src-html/org/apache/hadoop/hbase/client/locking/EntityLock.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/locking/EntityLock.html b/apidocs/src-html/org/apache/hadoop/hbase/client/locking/EntityLock.html index 64f8a30..012b199 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/locking/EntityLock.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/locking/EntityLock.html @@ -235,56 +235,57 @@ 227 return this; 228} 229 -230public void run() { -231 final LockHeartbeatRequest lockHeartbeatRequest = -232 LockHeartbeatRequest.newBuilder().setProcId(procId).build(); -233 -234 LockHeartbeatResponse response; -235 while (true) { -236try { -237 response = stub.lockHeartbeat(null, lockHeartbeatRequest); -238} catch (Exception e) { -239 e = ProtobufUtil.handleRemoteException(e); -240 locked.set(false); -241 LOG.error("Heartbeat failed, releasing " + EntityLock.this, e); -242 abort.abort("Heartbeat failed", e); -243 return; -244} -245if (!isLocked() response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { -246 locked.set(true); -247 latch.countDown(); -248} else if (isLocked() response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { -249 // Lock timed out. -250 locked.set(false); -251 abort.abort("Lock timed out.", null); -252 return; -253} -254 -255try { -256 // If lock not acquired yet, poll faster so we can notify faster. -257 long sleepTime = 1000; -258 if (isLocked()) { -259// If lock acquired, then use lock timeout to determine heartbeat rate. -260// If timeout is heartbeatTimeBuffer, send back to back heartbeats. -261sleepTime = Math.max(response.getTimeoutMs() - heartbeatTimeBuffer, 1); -262 } -263 if (testingSleepTime != 0) { -264sleepTime = testingSleepTime; -265 } -266 Thread.sleep(sleepTime); -267} catch (InterruptedException e) { -268 // Since there won't be any more heartbeats, assume lock will be lost. -269 locked.set(false); -270 if (!this.shutdown) { -271LOG.error("Interrupted, releasing " + this, e); -272abort.abort("Worker thread interrupted", e); -273 } -274 return; -275} -276 } -277} -278 } -279} +230@Override +231public void run() { +232 final LockHeartbeatRequest lockHeartbeatRequest = +233 LockHeartbeatRequest.newBuilder().setProcId(procId).build(); +234 +235 LockHeartbeatResponse response; +236 while (true) { +237try { +238 response = stub.lockHeartbeat(null, lockHeartbeatRequest); +239} catch (Exception e) { +240 e = ProtobufUtil.handleRemoteException(e); +241 locked.set(false); +242 LOG.error("Heartbeat failed, releasing " + EntityLock.this, e); +243 abort.abort("Heartbeat failed", e); +244 return; +245} +246if (!isLocked() response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { +247 locked.set(true); +248 latch.countDown(); +249} else if (isLocked() response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { +250 // Lock timed out. +251 locked.set(false); +252 abort.abort("Lock timed out.", null); +253 return; +254} +255 +256try { +257 // If lock not acquired yet, poll faster so we can notify faster. +258 long sleepTime = 1000; +259 if (isLocked()) { +260// If lock acquired, then use lock timeout to determine heartbeat rate. +261// If timeout is heartbeatTimeBuffer, send back to back heartbeats. +262sleepTime = Math.max(response.getTimeoutMs() - heartbeatTimeBuffer, 1); +263 } +264 if (testingSleepTime != 0) { +265sleepTime = testingSleepTime; +266 } +267 Thread.sleep(sleepTime); +268} catch (InterruptedException e) { +269 // Since there won't be any more heartbeats, assume lock will be lost. +270 locked.set(false); +271 if (!this.shutdown) { +272LOG.error("Interrupted, releasing " + this, e); +273abort.abort("Worker thread interrupted", e); +274 } +275 return; +276} +277 } +278} +279 } +280}
[19/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html b/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html index 9c3b029..4da4882 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html +++ b/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html @@ -280,7 +280,7 @@ extends regionServers -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true; title="class or interface in java.util">NavigableMapServerName,org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo regionServers +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true; title="class or interface in java.util">NavigableMapServerName,org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo regionServers http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html b/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html index 756eebd..0385115 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html +++ b/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html @@ -824,7 +824,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? registerListener -publicvoidregisterListener(ServerListenerlistener) +publicvoidregisterListener(ServerListenerlistener) Add the listener to the notification list. Parameters: @@ -838,7 +838,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? unregisterListener -publicbooleanunregisterListener(ServerListenerlistener) +publicbooleanunregisterListener(ServerListenerlistener) Remove the listener from the notification list. Parameters: @@ -852,7 +852,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? regionServerStartup -ServerNameregionServerStartup(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequestrequest, +ServerNameregionServerStartup(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequestrequest, http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true; title="class or interface in java.net">InetAddressia) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Let the server manager know a new regionserver has come online @@ -873,7 +873,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? updateLastFlushedSequenceIds -privatevoidupdateLastFlushedSequenceIds(ServerNamesn, +privatevoidupdateLastFlushedSequenceIds(ServerNamesn, ServerLoadhsl) Updates last flushed sequence Ids for the regions on server sn @@ -889,7 +889,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? regionServerReport -publicvoidregionServerReport(ServerNamesn, +publicvoidregionServerReport(ServerNamesn, ServerLoadsl) throws YouAreDeadException @@ -904,7 +904,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? checkAndRecordNewServer -booleancheckAndRecordNewServer(ServerNameserverName, +booleancheckAndRecordNewServer(ServerNameserverName, ServerLoadsl) Check is a server of same host and port already exists, if not, or the existed one got a smaller start code, record it. @@ -923,7 +923,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? checkClockSkew -privatevoidcheckClockSkew(ServerNameserverName, +privatevoidcheckClockSkew(ServerNameserverName, longserverCurrentTime) throws ClockOutOfSyncException Checks if the clock skew between the server and the master. If the clock skew exceeds the @@ -944,7 +944,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? checkIsDead -privatevoidcheckIsDead(ServerNameserverName, +privatevoidcheckIsDead(ServerNameserverName, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringwhat) throws YouAreDeadException If this server is on the dead list, reject it with a YouAreDeadException. @@ -965,7 +965,7 @@ extends
[32/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html index 3d7e662..8c97d73 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html @@ -1049,6 +1049,16 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. byte[]row) +private void +HBaseAdmin.flush(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterfaceadmin, + RegionInfoinfo) + + +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void +RawAsyncHBaseAdmin.flush(ServerNameserverName, + RegionInforegionInfo) + + static byte[] RegionInfoDisplay.getEndKeyForDisplay(RegionInfori, org.apache.hadoop.conf.Configurationconf) @@ -5023,6 +5033,16 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. +static HRegion +HRegion.openReadOnlyFileSystemHRegion(org.apache.hadoop.conf.Configurationconf, + org.apache.hadoop.fs.FileSystemfs, + org.apache.hadoop.fs.PathtableDir, + RegionInfoinfo, + TableDescriptorhtd) +Open a Region on a read-only file-system (like hdfs snapshots) + + + static HRegionFileSystem HRegionFileSystem.openRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, @@ -5032,12 +5052,12 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. Open Region from file-system. - + void RegionCoprocessorHost.postReplayWALs(RegionInfoinfo, org.apache.hadoop.fs.Pathedits) - + void RegionCoprocessorHost.postWALRestore(RegionInfoinfo, WALKeylogKey, @@ -5048,12 +5068,12 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. - + void RegionCoprocessorHost.preReplayWALs(RegionInfoinfo, org.apache.hadoop.fs.Pathedits) - + boolean RegionCoprocessorHost.preWALRestore(RegionInfoinfo, WALKeylogKey, @@ -5064,7 +5084,7 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. - + static boolean HRegion.rowIsInRange(RegionInfoinfo, byte[]row) @@ -5072,14 +5092,14 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. specified RegionInfo - + static boolean HRegion.rowIsInRange(RegionInfoinfo, byte[]row, intoffset, shortlength) - + org.apache.hadoop.fs.Path HRegionFileSystem.splitStoreFile(RegionInfohri, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfamilyName, @@ -5090,7 +5110,7 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. Write out a split reference. - + static void HRegion.warmupHRegion(RegionInfoinfo, TableDescriptorhtd, http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html index 65e4619..f38b5f3 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html @@ -2029,17 +2029,27 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. +static HRegion +HRegion.openReadOnlyFileSystemHRegion(org.apache.hadoop.conf.Configurationconf, + org.apache.hadoop.fs.FileSystemfs, + org.apache.hadoop.fs.PathtableDir, + RegionInfoinfo, + TableDescriptorhtd) +Open a Region on a read-only file-system (like hdfs snapshots) + + + (package private) void HRegion.setTableDescriptor(TableDescriptordesc) - + static void RegionCoprocessorHost.testTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf, TableDescriptorhtd) Sanity check the table coprocessor attributes of the supplied schema. - + static void HRegion.warmupHRegion(RegionInfoinfo,
[08/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html index 5e420c2..7639257 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":9,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":10,"i107":10,"i108":10,"i109":10 ,"i110":10,"i111":10,"i112":41,"i113":41,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":9,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":42,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":9,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":9,"i177":10,"i178":10,"i179":9,"i180":9,"i181":9,"i182":9,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":9,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10 ,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":9,"i229":9,"i230":10,"i231":10,"i232":10,"i233":10,"i234":10,"i235":10,"i236":10,"i237":10,"i238":10,"i239":10,"i240":10,"i241":9,"i242":10,"i243":10,"i244":10,"i245":10,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":10,"i252":10,"i253":10,"i254":10,"i255":9,"i256":10,"i257":10,"i258":10,"i259":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":9,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":10,"i107":10,"i108":10,"i109":10
[17/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html index 1e5e269..eecc720 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html +++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class RegionStates +public class RegionStates extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object RegionStates contains a set of Maps that describes the in-memory state of the AM, with the regions available in the system, the region in transition, the offline regions and @@ -513,7 +513,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? LOG -private static finalorg.slf4j.Logger LOG +private static finalorg.slf4j.Logger LOG @@ -522,7 +522,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? STATES_EXPECTED_ON_OPEN -protected static finalRegionState.State[] STATES_EXPECTED_ON_OPEN +protected static finalRegionState.State[] STATES_EXPECTED_ON_OPEN @@ -531,7 +531,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? STATES_EXPECTED_ON_CLOSE -protected static finalRegionState.State[] STATES_EXPECTED_ON_CLOSE +protected static finalRegionState.State[] STATES_EXPECTED_ON_CLOSE @@ -540,7 +540,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? REGION_STATE_STAMP_COMPARATOR -public static finalRegionStates.RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR +public static finalRegionStates.RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR @@ -549,7 +549,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? regionsMap -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStates.RegionStateNode regionsMap +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStates.RegionStateNode regionsMap RegionName -- i.e. RegionInfo.getRegionName() -- as bytes to RegionStates.RegionStateNode @@ -559,7 +559,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? regionInTransition -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode regionInTransition +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode regionInTransition @@ -568,7 +568,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? regionOffline -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode regionOffline +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode regionOffline Regions marked as offline on a read of hbase:meta. Unused or at least, once offlined, regions have no means of coming on line again. TODO. @@ -579,7 +579,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? regionFailedOpen -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStates.RegionFailedOpen regionFailedOpen +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStates.RegionFailedOpen regionFailedOpen @@ -588,7 +588,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? serverMap -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true; title="class or interface in
[23/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/DeadServer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/DeadServer.html b/devapidocs/org/apache/hadoop/hbase/master/DeadServer.html index d880bf7..3d34dcf 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/DeadServer.html +++ b/devapidocs/org/apache/hadoop/hbase/master/DeadServer.html @@ -332,7 +332,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? ServerNameDeathDateComparator -private statichttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true; title="class or interface in java.util">ComparatorPairServerName,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long ServerNameDeathDateComparator +private statichttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true; title="class or interface in java.util">ComparatorPairServerName,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long ServerNameDeathDateComparator @@ -488,7 +488,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? toString -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() Overrides: http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toStringin classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -501,7 +501,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? copyDeadServersSince -publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListPairServerName,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">LongcopyDeadServersSince(longts) +publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListPairServerName,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">LongcopyDeadServersSince(longts) Extract all the servers dead since a given time, and sort them. Parameters: @@ -517,7 +517,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getTimeOfDeath -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Date.html?is-external=true; title="class or interface in java.util">DategetTimeOfDeath(ServerNamedeadServerName) +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Date.html?is-external=true; title="class or interface in java.util">DategetTimeOfDeath(ServerNamedeadServerName) Get the time when a server died Parameters: @@ -533,7 +533,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? removeDeadServer -publicbooleanremoveDeadServer(ServerNamedeadServerName) +publicbooleanremoveDeadServer(ServerNamedeadServerName) remove the specified dead server Parameters:
[49/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/org/apache/hadoop/hbase/client/Admin.html -- diff --git a/apidocs/org/apache/hadoop/hbase/client/Admin.html b/apidocs/org/apache/hadoop/hbase/client/Admin.html index 07001f6..f2d60a6 100644 --- a/apidocs/org/apache/hadoop/hbase/client/Admin.html +++ b/apidocs/org/apache/hadoop/hbase/client/Admin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":38,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":38,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":38,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":38,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":38,"i69":38,"i70":50,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":38,"i77":38,"i78":6,"i79":50,"i80":6,"i81":6,"i82":6,"i83":38,"i84":38,"i85":18,"i86":18,"i87":6,"i88":50,"i89":6,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":18,"i99":18,"i100":50,"i101":18,"i102":6,"i103":38,"i104":6,"i105":6,"i106":6,"i107":38,"i108":18,"i109":6,"i110":6,"i111":6,"i112":18,"i113":6,"i114":6,"i115":38,"i116":38,"i1 17":38,"i118":38,"i119":6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":50,"i130":6,"i131":38,"i132":6,"i133":6,"i134":18,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":38,"i143":6,"i144":6,"i145":6,"i146":6,"i147":6,"i148":38,"i149":6,"i150":6,"i151":6,"i152":38,"i153":38,"i154":6,"i155":38,"i156":38,"i157":38,"i158":38,"i159":38,"i160":6,"i161":38,"i162":6,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":38,"i170":6,"i171":6,"i172":6,"i173":50,"i174":6,"i175":6,"i176":6,"i177":6,"i178":6,"i179":38,"i180":6,"i181":38,"i182":6,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":50,"i199":6,"i200":50,"i201":50,"i202":50,"i203":6,"i204":50,"i205":6,"i206":6,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":38,"i214":38,"i215":6,"i216":6,"i217":6,"i218":6,"i219":6,"i220":50,"i221":6,"i222":6,"i223":6,"i224":6," i225":6,"i226":6,"i227":6}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":38,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":38,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":38,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":38,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":38,"i69":38,"i70":50,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":38,"i77":38,"i78":6,"i79":50,"i80":6,"i81":6,"i82":6,"i83":6,"i84":38,"i85":38,"i86":18,"i87":18,"i88":6,"i89":50,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":18,"i100":18,"i101":50,"i102":18,"i103":6,"i104":38,"i105":6,"i106":6,"i107":6,"i108":38,"i109":18,"i110":6,"i111":6,"i112":6,"i113":18,"i114":6,"i115":6,"i116":38,"i11 7":38,"i118":38,"i119":38,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":50,"i131":6,"i132":38,"i133":6,"i134":6,"i135":18,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":38,"i144":6,"i145":6,"i146":6,"i147":6,"i148":6,"i149":38,"i150":6,"i151":6,"i152":6,"i153":38,"i154":38,"i155":6,"i156":38,"i157":38,"i158":38,"i159":38,"i160":38,"i161":6,"i162":38,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":38,"i171":6,"i172":6,"i173":6,"i174":50,"i175":6,"i176":6,"i177":6,"i178":6,"i179":6,"i180":38,"i181":6,"i182":38,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":50,"i200":6,"i201":50,"i202":50,"i203":50,"i204":6,"i205":50,"i206":6,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":6,"i214":38,"i215":38,"i216":6,"i217":6,"i218":6,"i219":6,"i220":6,"i221":50,"i222":6,"i223":6,"i224":6," i225":6,"i226":6,"i227":6,"i228":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -760,6 +760,12 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas +void +flushRegionServer(ServerNameserverName) +Flush all regions on the region server. + + +
[06/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html index eeffbba..404e5b0 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html @@ -1563,10 +1563,13 @@ implements clusterConnection -protectedClusterConnection clusterConnection +protectedClusterConnection clusterConnection Cluster connection to be shared by services. Initialized at server startup and closed when server shuts down. - Clients must never close it explicitly. + Clients must never close it explicitly. + Clients hosted by this Server should make use of this clusterConnection rather than create + their own; if they create their own, there is no way for the hosting server to shutdown + ongoing client RPCs. @@ -1575,7 +1578,7 @@ implements metaTableLocator -protectedMetaTableLocator metaTableLocator +protectedMetaTableLocator metaTableLocator @@ -1584,7 +1587,7 @@ implements tableDescriptors -protectedTableDescriptors tableDescriptors +protectedTableDescriptors tableDescriptors Go here to get table descriptors. @@ -1594,7 +1597,7 @@ implements replicationSourceHandler -protectedReplicationSourceService replicationSourceHandler +protectedReplicationSourceService replicationSourceHandler @@ -1603,7 +1606,7 @@ implements replicationSinkHandler -protectedReplicationSinkService replicationSinkHandler +protectedReplicationSinkService replicationSinkHandler @@ -1612,7 +1615,7 @@ implements compactSplitThread -publicCompactSplit compactSplitThread +publicCompactSplit compactSplitThread @@ -1621,7 +1624,7 @@ implements onlineRegions -protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,HRegion onlineRegions +protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,HRegion onlineRegions Map of regions currently being served by this region server. Key is the encoded region name. All access should be synchronized. @@ -1632,7 +1635,7 @@ implements regionFavoredNodesMap -protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true; title="class or interface in java.net">InetSocketAddress[] regionFavoredNodesMap +protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true; title="class or interface in java.net">InetSocketAddress[] regionFavoredNodesMap Map of encoded region names to the DataNode locations they should be hosted on We store the value as InetSocketAddress since this is used only in HDFS API (create() that takes favored nodes as hints for placing file blocks). @@ -1648,7 +1651,7 @@ implements leases -protectedLeases leases +protectedLeases leases @@ -1657,7 +1660,7 @@ implements executorService -protectedExecutorService executorService +protectedExecutorService executorService @@ -1666,7 +1669,7 @@ implements fsOk -protected volatileboolean fsOk +protected volatileboolean fsOk @@ -1675,7 +1678,7 @@ implements fs -protectedHFileSystem fs +protectedHFileSystem fs @@ -1684,7 +1687,7 @@ implements walFs -protectedHFileSystem walFs +protectedHFileSystem walFs @@ -1693,7 +1696,7 @@ implements stopped -private volatileboolean stopped +private volatileboolean stopped @@ -1702,7 +1705,7 @@ implements abortRequested -private volatileboolean abortRequested +private volatileboolean abortRequested @@ -1711,7 +1714,7 @@ implements rowlocks -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in
[37/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html index 3f4880f..5b38a30 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html +++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab"; -static class ConnectionImplementation.MasterServiceState +static class ConnectionImplementation.MasterServiceState extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object State of the MasterService connection/setup. @@ -222,7 +222,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? connection -Connection connection +Connection connection @@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? stub -org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface stub +org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface stub @@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? userCount -int userCount +int userCount @@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? MasterServiceState -MasterServiceState(Connectionconnection) +MasterServiceState(Connectionconnection) @@ -274,7 +274,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? toString -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() Overrides: http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toStringin classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -287,7 +287,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getStub -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">ObjectgetStub() +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">ObjectgetStub() @@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? clearStub -voidclearStub() +voidclearStub() @@ -305,7 +305,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? isMasterRunning -booleanisMasterRunning() +booleanisMasterRunning() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html index f5fdc63..c3447c5 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html +++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab"; -private final class ConnectionImplementation.MasterServiceStubMaker +private final class ConnectionImplementation.MasterServiceStubMaker extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object Class to make a MasterServiceStubMaker stub. @@ -197,7 +197,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? MasterServiceStubMaker -privateMasterServiceStubMaker() +privateMasterServiceStubMaker() @@ -214,7 +214,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? isMasterRunning -privatevoidisMasterRunning(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterfacestub)
[31/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html index 815025b..4c05ae9 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html @@ -255,8 +255,8 @@ extends org.apache.hadoop.conf.Configuration http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class?[] -getClasses(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, - http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class?[]defaultValue) +getClasses(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, + http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class?...defaultValue) http://docs.oracle.com/javase/8/docs/api/java/lang/ClassLoader.html?is-external=true; title="class or interface in java.lang">ClassLoader @@ -743,7 +743,7 @@ extends org.apache.hadoop.conf.Configuration setAllowNullValueProperties -publicvoidsetAllowNullValueProperties(booleanval) +publicvoidsetAllowNullValueProperties(booleanval) Overrides: setAllowNullValuePropertiesin classorg.apache.hadoop.conf.Configuration @@ -756,7 +756,7 @@ extends org.apache.hadoop.conf.Configuration getTrimmed -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetTrimmed(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetTrimmed(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) Overrides: getTrimmedin classorg.apache.hadoop.conf.Configuration @@ -769,7 +769,7 @@ extends org.apache.hadoop.conf.Configuration getTrimmed -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetTrimmed(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetTrimmed(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringdefaultValue) Overrides: @@ -783,7 +783,7 @@ extends org.apache.hadoop.conf.Configuration getRaw -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetRaw(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetRaw(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) Overrides: getRawin classorg.apache.hadoop.conf.Configuration @@ -796,7 +796,7 @@ extends org.apache.hadoop.conf.Configuration set -publicvoidset(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, +publicvoidset(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringvalue) Overrides: @@ -810,7 +810,7 @@ extends org.apache.hadoop.conf.Configuration set -publicvoidset(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, +publicvoidset(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname,
[02/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html index a132f94..26c39bc 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -class MemStoreFlusher +class MemStoreFlusher extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements FlushRequester Thread that flushes cache on request @@ -438,7 +438,7 @@ implements LOG -private static finalorg.slf4j.Logger LOG +private static finalorg.slf4j.Logger LOG @@ -447,7 +447,7 @@ implements conf -privateorg.apache.hadoop.conf.Configuration conf +privateorg.apache.hadoop.conf.Configuration conf @@ -456,7 +456,7 @@ implements flushQueue -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true; title="class or interface in java.util.concurrent">BlockingQueueMemStoreFlusher.FlushQueueEntry flushQueue +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true; title="class or interface in java.util.concurrent">BlockingQueueMemStoreFlusher.FlushQueueEntry flushQueue @@ -465,7 +465,7 @@ implements regionsInQueue -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapRegion,MemStoreFlusher.FlushRegionEntry regionsInQueue +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapRegion,MemStoreFlusher.FlushRegionEntry regionsInQueue @@ -474,7 +474,7 @@ implements wakeupPending -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicBoolean wakeupPending +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicBoolean wakeupPending @@ -483,7 +483,7 @@ implements threadWakeFrequency -private finallong threadWakeFrequency +private finallong threadWakeFrequency @@ -492,7 +492,7 @@ implements server -private finalHRegionServer server +private finalHRegionServer server @@ -501,7 +501,7 @@ implements lock -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true; title="class or interface in java.util.concurrent.locks">ReentrantReadWriteLock lock +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true; title="class or interface in java.util.concurrent.locks">ReentrantReadWriteLock lock @@ -510,7 +510,7 @@ implements blockSignal -private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object blockSignal +private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object blockSignal @@ -519,7 +519,7 @@ implements blockingWaitTime -privatelong blockingWaitTime +privatelong blockingWaitTime @@ -528,7 +528,7 @@ implements updatesBlockedMsHighWater -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true; title="class or interface in java.util.concurrent.atomic">LongAdder updatesBlockedMsHighWater +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true; title="class or interface in java.util.concurrent.atomic">LongAdder updatesBlockedMsHighWater @@ -537,7 +537,7 @@ implements flushHandlers -private finalMemStoreFlusher.FlushHandler[] flushHandlers +private finalMemStoreFlusher.FlushHandler[] flushHandlers @@ -546,7 +546,7 @@ implements flushRequestListeners -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListFlushRequestListener flushRequestListeners +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListFlushRequestListener flushRequestListeners @@ -563,7 +563,7 @@ implements MemStoreFlusher -publicMemStoreFlusher(org.apache.hadoop.conf.Configurationconf,
[36/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html index 1d3db70..58bdf6a 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab"; -protected static interface HBaseAdmin.ProcedureFuture.WaitForStateCallable +protected static interface HBaseAdmin.ProcedureFuture.WaitForStateCallable @@ -161,7 +161,7 @@ var activeTableTab = "activeTableTab"; checkState -booleancheckState(inttries) +booleancheckState(inttries) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -175,7 +175,7 @@ var activeTableTab = "activeTableTab"; throwInterruptedException -voidthrowInterruptedException() +voidthrowInterruptedException() throws http://docs.oracle.com/javase/8/docs/api/java/io/InterruptedIOException.html?is-external=true; title="class or interface in java.io">InterruptedIOException Throws: @@ -189,7 +189,7 @@ var activeTableTab = "activeTableTab"; throwTimeoutException -voidthrowTimeoutException(longelapsed) +voidthrowTimeoutException(longelapsed) throws http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeoutException.html?is-external=true; title="class or interface in java.util.concurrent">TimeoutException Throws: http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html index 79a8228..0b74b88 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html @@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private @InterfaceStability.Evolving -protected static class HBaseAdmin.ProcedureFutureV +protected static class HBaseAdmin.ProcedureFutureV extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true; title="class or interface in java.util.concurrent">FutureV Future that waits on a procedure result. @@ -328,7 +328,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren exception -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true; title="class or interface in java.util.concurrent">ExecutionException exception +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true; title="class or interface in java.util.concurrent">ExecutionException exception @@ -337,7 +337,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren procResultFound -privateboolean procResultFound +privateboolean procResultFound @@ -346,7 +346,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren done -privateboolean done +privateboolean done @@ -355,7 +355,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren cancelled -privateboolean cancelled +privateboolean cancelled @@ -364,7 +364,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren result -privateV result +privateV result @@ -373,7 +373,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren admin -private finalHBaseAdmin admin +private finalHBaseAdmin admin @@ -382,7 +382,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren procId -private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long procId +private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long procId @@ -399,7 +399,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren ProcedureFuture -publicProcedureFuture(HBaseAdminadmin, +publicProcedureFuture(HBaseAdminadmin,
[33/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html index 5e14bfd..aa35f49 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -707,222 +707,233 @@ implements failedFuture(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwableerror) +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void +flush(ServerNameserverName, + RegionInforegionInfo) + + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void flush(TableNametableName) Flush a table. - +
[42/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/checkstyle-aggregate.html -- diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html index 47d1582..1d92588 100644 --- a/checkstyle-aggregate.html +++ b/checkstyle-aggregate.html @@ -7,7 +7,7 @@ - + Apache HBase Checkstyle Results @@ -286,10 +286,10 @@ Warnings Errors -3500 +3503 0 0 -18095 +17563 Files @@ -447,7 +447,7 @@ org/apache/hadoop/hbase/HBaseTestingUtility.java 0 0 -267 +266 org/apache/hadoop/hbase/HColumnDescriptor.java 0 @@ -892,7 +892,7 @@ org/apache/hadoop/hbase/TestRegionLoad.java 0 0 -2 +1 org/apache/hadoop/hbase/TestRegionRebalancing.java 0 @@ -1592,7 +1592,7 @@ org/apache/hadoop/hbase/client/ConnectionImplementation.java 0 0 -7 +9 org/apache/hadoop/hbase/client/ConnectionUtils.java 0 @@ -1742,7 +1742,7 @@ org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java 0 0 -89 +88 org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java 0 @@ -2004,360 +2004,350 @@ 0 17 -org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java -0 -0 -1 - org/apache/hadoop/hbase/client/TestAsyncTable.java 0 0 1 - + org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java 0 0 2 - + org/apache/hadoop/hbase/client/TestAsyncTableScanner.java 0 0 1 - + org/apache/hadoop/hbase/client/TestAttributes.java 0 0 6 - + org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java 0 0 3 - + org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java 0 0 5 - + org/apache/hadoop/hbase/client/TestBufferedMutator.java 0 0 1 - + org/apache/hadoop/hbase/client/TestCIBadHostname.java 0 0 1 - + org/apache/hadoop/hbase/client/TestCheckAndMutate.java 0 0 3 - + org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java 0 0 4 - + org/apache/hadoop/hbase/client/TestClientNoCluster.java 0 0 33 - + org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java 0 0 1 - + org/apache/hadoop/hbase/client/TestClientPushback.java 0 0 9 - + org/apache/hadoop/hbase/client/TestClientScanner.java 0 0 11 - + org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java 0 0 2 - + org/apache/hadoop/hbase/client/TestClientTimeouts.java 0 0 4 - + org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java 0 0 6 - + org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java 0 0 1 - + org/apache/hadoop/hbase/client/TestConnectionImplementation.java 0 0 15 - + org/apache/hadoop/hbase/client/TestConnectionUtils.java 0 0 2 - + org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java 0 0 2 - + org/apache/hadoop/hbase/client/TestEnableTable.java 0 0 5 - + org/apache/hadoop/hbase/client/TestFastFail.java 0 0 7 - + org/apache/hadoop/hbase/client/TestFromClientSide.java 0 0 -155 - +71 + org/apache/hadoop/hbase/client/TestFromClientSide3.java 0 0 -24 - +20 + org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java 0 0 2 - + org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java 0 0 2 - + org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java 0 0 1 - + org/apache/hadoop/hbase/client/TestGet.java 0 0 5 - + org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java 0 0 11 - + org/apache/hadoop/hbase/client/TestHTableMultiplexer.java 0 0 5 - + org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java 0 0 3 - + org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java 0 0 2 - + org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java 0 0 1 - + org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java 0 0 1 - + org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java 0 0 2 - + org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java 0 0 2 - + org/apache/hadoop/hbase/client/TestIntraRowPagination.java 0 0 1 - + org/apache/hadoop/hbase/client/TestLeaseRenewal.java 0 0 4 - + org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java 0 0 1 - + org/apache/hadoop/hbase/client/TestMetaCache.java 0 0 17 - + org/apache/hadoop/hbase/client/TestMetaWithReplicas.java 0 0 9 - + org/apache/hadoop/hbase/client/TestMetricsConnection.java 0 0 7 - + org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java 0 0 1 - + org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java 0 0 1 - + org/apache/hadoop/hbase/client/TestMultiParallel.java 0 0 9 - + org/apache/hadoop/hbase/client/TestMultipleTimestamps.java 0 0 -32 - +30 + org/apache/hadoop/hbase/client/TestOperation.java 0 0 11 - + org/apache/hadoop/hbase/client/TestProcedureFuture.java 0 0 1 - + org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java 0 0 1 - + org/apache/hadoop/hbase/client/TestPutWithDelete.java 0 0 4 - +
[40/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/Admin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/Admin.html b/devapidocs/org/apache/hadoop/hbase/client/Admin.html index eb12d14..2d7cb9a 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/Admin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/Admin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":38,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":38,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":38,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":38,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":38,"i69":38,"i70":50,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":38,"i77":38,"i78":6,"i79":50,"i80":6,"i81":6,"i82":6,"i83":38,"i84":38,"i85":18,"i86":18,"i87":6,"i88":50,"i89":6,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":18,"i99":18,"i100":50,"i101":18,"i102":6,"i103":38,"i104":6,"i105":6,"i106":6,"i107":38,"i108":18,"i109":6,"i110":6,"i111":6,"i112":18,"i113":6,"i114":6,"i115":38,"i116":38,"i1 17":38,"i118":38,"i119":6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":50,"i130":6,"i131":38,"i132":6,"i133":6,"i134":18,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":38,"i143":6,"i144":6,"i145":6,"i146":6,"i147":6,"i148":38,"i149":6,"i150":6,"i151":6,"i152":38,"i153":38,"i154":6,"i155":38,"i156":38,"i157":38,"i158":38,"i159":38,"i160":6,"i161":38,"i162":6,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":38,"i170":6,"i171":6,"i172":6,"i173":50,"i174":6,"i175":6,"i176":6,"i177":6,"i178":6,"i179":38,"i180":6,"i181":38,"i182":6,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":50,"i199":6,"i200":50,"i201":50,"i202":50,"i203":6,"i204":50,"i205":6,"i206":6,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":38,"i214":38,"i215":6,"i216":6,"i217":6,"i218":6,"i219":6,"i220":50,"i221":6,"i222":6,"i223":6,"i224":6," i225":6,"i226":6,"i227":6}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":38,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":38,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":38,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":38,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":38,"i69":38,"i70":50,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":38,"i77":38,"i78":6,"i79":50,"i80":6,"i81":6,"i82":6,"i83":6,"i84":38,"i85":38,"i86":18,"i87":18,"i88":6,"i89":50,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":18,"i100":18,"i101":50,"i102":18,"i103":6,"i104":38,"i105":6,"i106":6,"i107":6,"i108":38,"i109":18,"i110":6,"i111":6,"i112":6,"i113":18,"i114":6,"i115":6,"i116":38,"i11 7":38,"i118":38,"i119":38,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":50,"i131":6,"i132":38,"i133":6,"i134":6,"i135":18,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":38,"i144":6,"i145":6,"i146":6,"i147":6,"i148":6,"i149":38,"i150":6,"i151":6,"i152":6,"i153":38,"i154":38,"i155":6,"i156":38,"i157":38,"i158":38,"i159":38,"i160":38,"i161":6,"i162":38,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":38,"i171":6,"i172":6,"i173":6,"i174":50,"i175":6,"i176":6,"i177":6,"i178":6,"i179":6,"i180":38,"i181":6,"i182":38,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":50,"i200":6,"i201":50,"i202":50,"i203":50,"i204":6,"i205":50,"i206":6,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":6,"i214":38,"i215":38,"i216":6,"i217":6,"i218":6,"i219":6,"i220":6,"i221":50,"i222":6,"i223":6,"i224":6," i225":6,"i226":6,"i227":6,"i228":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -764,6 +764,12 @@ extends +void +flushRegionServer(ServerNameserverName) +Flush all regions on the region server. + + +
[35/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html index 824976f..46cd36e 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":42,"i29":42,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":42,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":9,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":41,"i96":10,"i97":10,"i98":10,"i99":10,"i100":42,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109 ":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":9,"i116":10,"i117":10,"i118":10,"i119":42,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":42,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":42,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2 09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":42,"i29":42,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":42,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":10,"i86":10,"i87":9,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":41,"i98":10,"i99":10,"i100":10,"i101":10,"i102":42,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109 ":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":9,"i118":10,"i119":10,"i120":10,"i121":42,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":42,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":42,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2 09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":10};
[10/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html index 9ebdf9a..3c63532 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html @@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab"; -class HRegion.RegionScannerImpl +class HRegion.RegionScannerImpl extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements RegionScanner, Shipper, RpcCallback RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). @@ -425,7 +425,7 @@ implements storeHeap -KeyValueHeap storeHeap +KeyValueHeap storeHeap @@ -434,7 +434,7 @@ implements joinedHeap -KeyValueHeap joinedHeap +KeyValueHeap joinedHeap Heap of key-values that are not essential for the provided filters and are thus read on demand, if on-demand column family loading is enabled. @@ -445,7 +445,7 @@ implements joinedContinuationRow -protectedCell joinedContinuationRow +protectedCell joinedContinuationRow If the joined heap data gathering is interrupted due to scan limits, this will contain the row for which we are populating the values. @@ -456,7 +456,7 @@ implements filterClosed -privateboolean filterClosed +privateboolean filterClosed @@ -465,7 +465,7 @@ implements stopRow -protected finalbyte[] stopRow +protected finalbyte[] stopRow @@ -474,7 +474,7 @@ implements includeStopRow -protected finalboolean includeStopRow +protected finalboolean includeStopRow @@ -483,7 +483,7 @@ implements region -protected finalHRegion region +protected finalHRegion region @@ -492,7 +492,7 @@ implements comparator -protected finalCellComparator comparator +protected finalCellComparator comparator @@ -501,7 +501,7 @@ implements readPt -private finallong readPt +private finallong readPt @@ -510,7 +510,7 @@ implements maxResultSize -private finallong maxResultSize +private finallong maxResultSize @@ -519,7 +519,7 @@ implements defaultScannerContext -private finalScannerContext defaultScannerContext +private finalScannerContext defaultScannerContext @@ -528,7 +528,7 @@ implements filter -private finalFilterWrapper filter +private finalFilterWrapper filter @@ -545,7 +545,7 @@ implements RegionScannerImpl -RegionScannerImpl(Scanscan, +RegionScannerImpl(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners, HRegionregion) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -561,7 +561,7 @@ implements RegionScannerImpl -RegionScannerImpl(Scanscan, +RegionScannerImpl(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners, HRegionregion, longnonceGroup, @@ -587,7 +587,7 @@ implements getRegionInfo -publicRegionInfogetRegionInfo() +publicRegionInfogetRegionInfo() Specified by: getRegionInfoin interfaceRegionScanner @@ -602,7 +602,7 @@ implements initializeScanners -protectedvoidinitializeScanners(Scanscan, +protectedvoidinitializeScanners(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -617,7 +617,7 @@ implements initializeKVHeap -protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScannerscanners, +protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScannerscanners, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScannerjoinedScanners, HRegionregion) throws
[43/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html index f999cbd..de398a3 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html @@ -308,412 +308,414 @@ 300 boolean inArchive = false; 301 long size = -1; 302 try { -303if ((inArchive = fs.exists(link.getArchivePath( { -304 size = fs.getFileStatus(link.getArchivePath()).getLen(); -305 hfilesArchiveSize.addAndGet(size); -306 hfilesArchiveCount.incrementAndGet(); -307 -308 // If store file is not shared with other snapshots and tables, -309 // increase nonSharedHfilesArchiveSize -310 if ((filesMap != null) -311 !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { -312 nonSharedHfilesArchiveSize.addAndGet(size); -313 } -314} else if (inArchive = fs.exists(link.getMobPath())) { -315 size = fs.getFileStatus(link.getMobPath()).getLen(); -316 hfilesMobSize.addAndGet(size); -317 hfilesMobCount.incrementAndGet(); -318} else { -319 size = link.getFileStatus(fs).getLen(); -320 hfilesSize.addAndGet(size); -321 hfilesCount.incrementAndGet(); -322} -323isCorrupted = (storeFile.hasFileSize() storeFile.getFileSize() != size); -324if (isCorrupted) hfilesCorrupted.incrementAndGet(); -325 } catch (FileNotFoundException e) { -326 hfilesMissing.incrementAndGet(); -327 } -328 return new FileInfo(inArchive, size, isCorrupted); -329} -330 -331/** -332 * Add the specified log file to the stats -333 * @param server server name -334 * @param logfile log file name -335 * @return the log information -336 */ -337FileInfo addLogFile(final String server, final String logfile) throws IOException { -338 WALLink logLink = new WALLink(conf, server, logfile); -339 long size = -1; -340 try { -341size = logLink.getFileStatus(fs).getLen(); -342logSize.addAndGet(size); -343logsCount.incrementAndGet(); -344 } catch (FileNotFoundException e) { -345logsMissing.incrementAndGet(); -346 } -347 return new FileInfo(false, size, false); -348} -349 } -350 -351 private FileSystem fs; -352 private Path rootDir; -353 -354 private SnapshotManifest snapshotManifest; +303if (fs.exists(link.getArchivePath())) { +304 inArchive = true; +305 size = fs.getFileStatus(link.getArchivePath()).getLen(); +306 hfilesArchiveSize.addAndGet(size); +307 hfilesArchiveCount.incrementAndGet(); +308 +309 // If store file is not shared with other snapshots and tables, +310 // increase nonSharedHfilesArchiveSize +311 if ((filesMap != null) +312 !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { +313 nonSharedHfilesArchiveSize.addAndGet(size); +314 } +315} else if (fs.exists(link.getMobPath())) { +316 inArchive = true; +317 size = fs.getFileStatus(link.getMobPath()).getLen(); +318 hfilesMobSize.addAndGet(size); +319 hfilesMobCount.incrementAndGet(); +320} else { +321 size = link.getFileStatus(fs).getLen(); +322 hfilesSize.addAndGet(size); +323 hfilesCount.incrementAndGet(); +324} +325isCorrupted = (storeFile.hasFileSize() storeFile.getFileSize() != size); +326if (isCorrupted) hfilesCorrupted.incrementAndGet(); +327 } catch (FileNotFoundException e) { +328 hfilesMissing.incrementAndGet(); +329 } +330 return new FileInfo(inArchive, size, isCorrupted); +331} +332 +333/** +334 * Add the specified log file to the stats +335 * @param server server name +336 * @param logfile log file name +337 * @return the log information +338 */ +339FileInfo addLogFile(final String server, final String logfile) throws IOException { +340 WALLink logLink = new WALLink(conf, server, logfile); +341 long size = -1; +342 try { +343size = logLink.getFileStatus(fs).getLen(); +344logSize.addAndGet(size); +345logsCount.incrementAndGet(); +346 } catch (FileNotFoundException e) { +347logsMissing.incrementAndGet(); +348 } +349 return new FileInfo(false, size, false); +350} +351 } +352 +353 private FileSystem fs; +354 private Path rootDir; 355 -356 private boolean listSnapshots = false; -357 private String snapshotName; -358
[24/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html index 1566a58..93e57a9 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html @@ -290,7 +290,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? add -publicvoidadd(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true; title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry) +publicvoidadd(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true; title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry) Attempt to add the specified entry to this queue. If the queue is smaller than the max size, or if the specified element is @@ -308,7 +308,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? poll -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true; title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypoll() +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true; title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypoll() Returns: The next element in this queue, or null if the queue is @@ -322,7 +322,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? pollLast -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true; title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypollLast() +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true; title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypollLast() Returns: The last element in this queue, or null if the queue is @@ -336,7 +336,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? cacheSize -publiclongcacheSize() +publiclongcacheSize() Total size of all elements in this queue. Returns: http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html index 0d84b3f..5c55313 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html @@ -273,12 +273,12 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State -org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches -org.apache.hadoop.hbase.io.hfile.BlockPriority org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory +org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches org.apache.hadoop.hbase.io.hfile.BlockType org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType +org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State +org.apache.hadoop.hbase.io.hfile.BlockPriority http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html b/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html index aad02e1..0e1958d 100644 --- a/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html +++ b/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html @@ -330,7 +330,7 @@ extends getCallRunner -protectedCallRunnergetCallRunner() +protectedCallRunnergetCallRunner() throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedException Overrides: @@ -348,7 +348,7 @@ extends loadCallRunner
[22/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html index 5f95040..0bc5dcd 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html @@ -492,8 +492,8 @@ implements void -abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringmsg, - http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwablet) +abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringreason, + http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwablecause) Cause the server to exit without closing the regions it is serving, the log it is using and without notifying the master. @@ -1305,7 +1305,9 @@ implements void -shutdown() +shutdown() +Shutdown the cluster. + long @@ -3478,8 +3480,8 @@ implements abort -publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringmsg, - http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwablet) +publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringreason, + http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwablecause) Description copied from class:HRegionServer Cause the server to exit without closing the regions it is serving, the log it is using and without notifying the master. Used unit testing and on @@ -3490,8 +3492,8 @@ implements Overrides: abortin classHRegionServer Parameters: -msg - the reason we are aborting -t - the exception that caused the abort, or null +reason - the reason we are aborting +cause - the exception that caused the abort, or null @@ -3501,7 +3503,7 @@ implements getZooKeeper -publicZKWatchergetZooKeeper() +publicZKWatchergetZooKeeper() Description copied from interface:Server Gets the ZooKeeper instance for this server. @@ -3518,7 +3520,7 @@ implements getMasterCoprocessorHost -publicMasterCoprocessorHostgetMasterCoprocessorHost() +publicMasterCoprocessorHostgetMasterCoprocessorHost() Specified by: getMasterCoprocessorHostin interfaceMasterServices @@ -3533,7 +3535,7 @@ implements getMasterQuotaManager -publicMasterQuotaManagergetMasterQuotaManager() +publicMasterQuotaManagergetMasterQuotaManager() Specified by: getMasterQuotaManagerin interfaceMasterServices @@ -3548,7 +3550,7 @@ implements getMasterProcedureExecutor -publicProcedureExecutorMasterProcedureEnvgetMasterProcedureExecutor() +publicProcedureExecutorMasterProcedureEnvgetMasterProcedureExecutor() Specified by: getMasterProcedureExecutorin interfaceMasterServices @@ -3563,7 +3565,7 @@ implements getServerName -publicServerNamegetServerName() +publicServerNamegetServerName() Specified by: getServerNamein interfaceServer @@ -3580,7 +3582,7 @@ implements getAssignmentManager -publicAssignmentManagergetAssignmentManager() +publicAssignmentManagergetAssignmentManager() Specified by: getAssignmentManagerin interfaceMasterServices @@ -3595,7 +3597,7 @@ implements getCatalogJanitor -publicCatalogJanitorgetCatalogJanitor() +publicCatalogJanitorgetCatalogJanitor() Specified by: getCatalogJanitorin interfaceMasterServices @@ -3610,7 +3612,7 @@ implements getRegionServerFatalLogBuffer -publicMemoryBoundedLogMessageBuffergetRegionServerFatalLogBuffer() +publicMemoryBoundedLogMessageBuffergetRegionServerFatalLogBuffer() @@ -3619,8 +3621,10 @@ implements shutdown -publicvoidshutdown() +publicvoidshutdown() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException +Shutdown the cluster. + Master runs a coordinated stop of all RegionServers and then itself. Throws: http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -3633,7 +3637,7 @@ implements stopMaster -publicvoidstopMaster() +publicvoidstopMaster() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -3647,7 +3651,7 @@ implements stop
[39/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html index a9ecb58..dd4661f 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":18,"i48":6,"i49":6,"i50":18,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":18,"i58":18,"i59":18,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":18,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":18,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":18,"i92":6,"i93":6,"i94":6,"i95":18,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":18,"i102":18,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119" :6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":18,"i130":18,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6}; +var methods = {"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":18,"i49":6,"i50":6,"i51":18,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":18,"i59":18,"i60":18,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":18,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":18,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":18,"i93":6,"i94":6,"i95":6,"i96":18,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":18,"i103":18,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119" :6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":18,"i131":18,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -440,168 +440,174 @@ public interface +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void +flushRegionServer(ServerNameserverName) +Flush all region on the region server. + + + default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true; title="class or interface in java.util">CollectionServerName getBackupMasters() - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureClusterMetrics getClusterMetrics() - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureClusterMetrics getClusterMetrics(http://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true; title="class or interface in java.util">EnumSetClusterMetrics.Optionoptions) - + default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureCompactionState getCompactionState(TableNametableName) Get the current compaction state of a table. - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in
hbase-site git commit: INFRA-10751 Empty commit
Repository: hbase-site Updated Branches: refs/heads/asf-site 8118541fa -> 4de7b70f4 INFRA-10751 Empty commit Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/4de7b70f Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/4de7b70f Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/4de7b70f Branch: refs/heads/asf-site Commit: 4de7b70f43ad1df11012b71f3acad1c48fe90749 Parents: 8118541 Author: jenkinsAuthored: Tue Jan 23 15:31:14 2018 + Committer: jenkins Committed: Tue Jan 23 15:31:14 2018 + -- --
[48/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html -- diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html index 319cf14..f660c5a 100644 --- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html +++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":18,"i48":6,"i49":6,"i50":18,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":18,"i58":18,"i59":18,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":18,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":18,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":18,"i92":6,"i93":6,"i94":6,"i95":18,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":18,"i102":18,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119" :6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":18,"i130":18,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6}; +var methods = {"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":18,"i49":6,"i50":6,"i51":18,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":18,"i59":18,"i60":18,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":18,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":18,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":18,"i93":6,"i94":6,"i95":6,"i96":18,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":18,"i103":18,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119" :6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":18,"i131":18,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -436,168 +436,174 @@ public interface +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void +flushRegionServer(ServerNameserverName) +Flush all region on the region server. + + + default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true; title="class or interface in java.util">CollectionServerName getBackupMasters() - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureClusterMetrics getClusterMetrics() - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureClusterMetrics getClusterMetrics(http://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true; title="class or interface in java.util">EnumSetorg.apache.hadoop.hbase.ClusterMetrics.Optionoptions) - + default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureCompactionState getCompactionState(TableNametableName) Get the current compaction state of a table. - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in
[51/51] [partial] hbase-site git commit: Published site at .
Published site at . Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/8118541f Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/8118541f Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/8118541f Branch: refs/heads/asf-site Commit: 8118541fadc18440bef7e381b22742249d5812cc Parents: 08b2580 Author: jenkinsAuthored: Tue Jan 23 15:30:24 2018 + Committer: jenkins Committed: Tue Jan 23 15:30:24 2018 + -- acid-semantics.html | 4 +- apache_hbase_reference_guide.pdf| 6 +- apidocs/index-all.html | 8 + .../hadoop/hbase/class-use/ServerName.html |12 + .../org/apache/hadoop/hbase/client/Admin.html | 647 +- .../apache/hadoop/hbase/client/AsyncAdmin.html | 434 +- .../hadoop/hbase/snapshot/SnapshotInfo.html |18 +- .../org/apache/hadoop/hbase/client/Admin.html | 3643 +-- .../apache/hadoop/hbase/client/AsyncAdmin.html | 1728 +- .../hadoop/hbase/client/locking/EntityLock.html | 101 +- .../hadoop/hbase/snapshot/SnapshotInfo.html | 808 +- book.html | 2 +- bulk-loads.html | 4 +- checkstyle-aggregate.html | 28594 - checkstyle.rss | 160 +- coc.html| 4 +- cygwin.html | 4 +- dependencies.html | 4 +- dependency-convergence.html | 4 +- dependency-info.html| 4 +- dependency-management.html | 4 +- devapidocs/constant-values.html | 8 +- devapidocs/index-all.html |34 +- .../hadoop/hbase/backup/package-tree.html | 2 +- .../hadoop/hbase/class-use/ServerName.html |29 + .../org/apache/hadoop/hbase/client/Admin.html | 647 +- .../apache/hadoop/hbase/client/AsyncAdmin.html | 434 +- .../hadoop/hbase/client/AsyncHBaseAdmin.html| 387 +- ...ectionImplementation.MasterServiceState.html |18 +- ...onImplementation.MasterServiceStubMaker.html |10 +- ...ntation.ServerErrorTracker.ServerErrors.html |10 +- ...ectionImplementation.ServerErrorTracker.html |20 +- .../hbase/client/ConnectionImplementation.html |82 +- .../HBaseAdmin.MergeTableRegionsFuture.html | 8 +- .../client/HBaseAdmin.ModifyTableFuture.html|10 +- .../client/HBaseAdmin.NamespaceFuture.html |12 +- ...in.ProcedureFuture.WaitForStateCallable.html | 8 +- .../client/HBaseAdmin.ProcedureFuture.html |48 +- .../client/HBaseAdmin.ReplicationFuture.html|10 +- .../HBaseAdmin.RestoreSnapshotFuture.html | 8 +- .../HBaseAdmin.SplitTableRegionFuture.html | 8 +- ...n.TableFuture.TableWaitForStateCallable.html | 8 +- .../hbase/client/HBaseAdmin.TableFuture.html|30 +- .../client/HBaseAdmin.ThrowableAbortable.html | 8 +- .../apache/hadoop/hbase/client/HBaseAdmin.html | 664 +- ...dmin.AddColumnFamilyProcedureBiConsumer.html | 6 +- ...dmin.CreateNamespaceProcedureBiConsumer.html | 6 +- ...aseAdmin.CreateTableProcedureBiConsumer.html | 6 +- ...n.DeleteColumnFamilyProcedureBiConsumer.html | 6 +- ...dmin.DeleteNamespaceProcedureBiConsumer.html | 6 +- ...aseAdmin.DeleteTableProcedureBiConsumer.html | 8 +- ...seAdmin.DisableTableProcedureBiConsumer.html | 6 +- ...aseAdmin.EnableTableProcedureBiConsumer.html | 6 +- ...min.MergeTableRegionProcedureBiConsumer.html | 6 +- ...n.ModifyColumnFamilyProcedureBiConsumer.html | 6 +- ...dmin.ModifyNamespaceProcedureBiConsumer.html | 6 +- ...aseAdmin.ModifyTableProcedureBiConsumer.html | 6 +- ...HBaseAdmin.NamespaceProcedureBiConsumer.html |14 +- .../RawAsyncHBaseAdmin.ProcedureBiConsumer.html |10 +- ...aseAdmin.ReplicationProcedureBiConsumer.html |14 +- ...min.SplitTableRegionProcedureBiConsumer.html | 6 +- ...syncHBaseAdmin.TableProcedureBiConsumer.html |14 +- ...eAdmin.TruncateTableProcedureBiConsumer.html | 6 +- .../hadoop/hbase/client/RawAsyncHBaseAdmin.html | 532 +- .../hbase/client/class-use/RegionInfo.html |36 +- .../hbase/client/class-use/TableDescriptor.html |14 +- .../locking/EntityLock.LockHeartbeatWorker.html | 2 +- .../hadoop/hbase/client/package-tree.html |24 +- .../hadoop/hbase/conf/ConfigurationManager.html | 2 +- .../ZkSplitLogWorkerCoordination.html | 2 +- ...ocessorHost.ObserverOperationWithResult.html | 2 +- .../hbase/coprocessor/CoprocessorHost.html | 6 +-
[21/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html index 9b9941b..2fb2450 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html +++ b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html @@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab"; -public static class HMasterCommandLine.LocalHMaster +public static class HMasterCommandLine.LocalHMaster extends HMaster @@ -318,7 +318,7 @@ extends zkcluster -privateMiniZooKeeperCluster zkcluster +privateMiniZooKeeperCluster zkcluster @@ -335,7 +335,7 @@ extends LocalHMaster -publicLocalHMaster(org.apache.hadoop.conf.Configurationconf) +publicLocalHMaster(org.apache.hadoop.conf.Configurationconf) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException, org.apache.zookeeper.KeeperException, http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedException @@ -361,7 +361,7 @@ extends run -publicvoidrun() +publicvoidrun() Description copied from class:HRegionServer The HRegionServer sticks in this loop until closed. @@ -378,7 +378,7 @@ extends setZKCluster -voidsetZKCluster(MiniZooKeeperClusterzkcluster) +voidsetZKCluster(MiniZooKeeperClusterzkcluster) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html index 0b918c0..95fdd9a 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html +++ b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html @@ -338,7 +338,7 @@ extends getUsage -protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetUsage() +protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetUsage() Description copied from class:ServerCommandLine Implementing subclasses should return a usage string to print out. @@ -353,7 +353,7 @@ extends run -publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) +publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -367,7 +367,7 @@ extends startMaster -privateintstartMaster() +privateintstartMaster() @@ -376,7 +376,7 @@ extends stopMaster -privateintstopMaster() +privateintstopMaster() @@ -385,7 +385,7 @@ extends waitOnMasterThreads -privatevoidwaitOnMasterThreads(LocalHBaseClustercluster) +privatevoidwaitOnMasterThreads(LocalHBaseClustercluster) throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedException Throws: @@ -399,7 +399,7 @@ extends closeAllRegionServerThreads -private staticvoidcloseAllRegionServerThreads(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListJVMClusterUtil.RegionServerThreadregionservers) +private staticvoidcloseAllRegionServerThreads(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListJVMClusterUtil.RegionServerThreadregionservers) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html index ab927d5..a145836 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html +++ b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html @@ -536,7 +536,7 @@ extends org.apache.hadoop.conf.Configurable, onConfigurationChange
[29/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html index 9c7f010..9df0ee6 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab"; -static interface HFileBlock.FSReader +static interface HFileBlock.FSReader An HFile block reader with iteration ability. @@ -199,7 +199,7 @@ var activeTableTab = "activeTableTab"; readBlockData -HFileBlockreadBlockData(longoffset, +HFileBlockreadBlockData(longoffset, longonDiskSize, booleanpread, booleanupdateMetrics) @@ -224,7 +224,7 @@ var activeTableTab = "activeTableTab"; blockRange -HFileBlock.BlockIteratorblockRange(longstartOffset, +HFileBlock.BlockIteratorblockRange(longstartOffset, longendOffset) Creates a block iterator over the given portion of the HFile. The iterator returns blocks starting with offset such that offset = @@ -246,7 +246,7 @@ var activeTableTab = "activeTableTab"; closeStreams -voidcloseStreams() +voidcloseStreams() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Closes the backing streams @@ -261,7 +261,7 @@ var activeTableTab = "activeTableTab"; getBlockDecodingContext -HFileBlockDecodingContextgetBlockDecodingContext() +HFileBlockDecodingContextgetBlockDecodingContext() Get a decoder for BlockType.ENCODED_DATA blocks from this file. @@ -271,7 +271,7 @@ var activeTableTab = "activeTableTab"; getDefaultBlockDecodingContext -HFileBlockDecodingContextgetDefaultBlockDecodingContext() +HFileBlockDecodingContextgetDefaultBlockDecodingContext() Get the default decoder for blocks from this file. @@ -281,7 +281,7 @@ var activeTableTab = "activeTableTab"; setIncludesMemStoreTS -voidsetIncludesMemStoreTS(booleanincludesMemstoreTS) +voidsetIncludesMemStoreTS(booleanincludesMemstoreTS) @@ -290,7 +290,7 @@ var activeTableTab = "activeTableTab"; setDataBlockEncoder -voidsetDataBlockEncoder(HFileDataBlockEncoderencoder) +voidsetDataBlockEncoder(HFileDataBlockEncoderencoder) @@ -299,7 +299,7 @@ var activeTableTab = "activeTableTab"; unbufferStream -voidunbufferStream() +voidunbufferStream() To close the stream's socket. Note: This can be concurrently called from multiple threads and implementation should take care of thread safety. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html index 82418c0..b19e81d 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -static class HFileBlock.FSReaderImpl +static class HFileBlock.FSReaderImpl extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements HFileBlock.FSReader Reads version 2 HFile blocks from the filesystem. @@ -376,7 +376,7 @@ implements streamWrapper -privateFSDataInputStreamWrapper streamWrapper +privateFSDataInputStreamWrapper streamWrapper The file system stream of the underlying HFile that does or doesn't do checksum validations in the filesystem @@ -387,7 +387,7 @@ implements encodedBlockDecodingCtx -privateHFileBlockDecodingContext encodedBlockDecodingCtx +privateHFileBlockDecodingContext encodedBlockDecodingCtx @@ -396,7 +396,7 @@ implements defaultDecodingCtx -private finalHFileBlockDefaultDecodingContext defaultDecodingCtx +private finalHFileBlockDefaultDecodingContext defaultDecodingCtx Default context used when BlockType != BlockType.ENCODED_DATA. @@ -406,7 +406,7 @@ implements prefetchedHeader -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicReferenceHFileBlock.PrefetchedHeader prefetchedHeader +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true; title="class or interface in
[16/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html index de0f43c..85b7b98 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html +++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html @@ -394,7 +394,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? cache -privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistribution cache +privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistribution cache @@ -411,7 +411,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? RegionLocationFinder -RegionLocationFinder() +RegionLocationFinder() @@ -428,7 +428,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? createCache -privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistributioncreateCache() +privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistributioncreateCache() Create a cache for region to list of servers Returns: @@ -442,7 +442,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getConf -publicorg.apache.hadoop.conf.ConfigurationgetConf() +publicorg.apache.hadoop.conf.ConfigurationgetConf() @@ -451,7 +451,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? setConf -publicvoidsetConf(org.apache.hadoop.conf.Configurationconf) +publicvoidsetConf(org.apache.hadoop.conf.Configurationconf) @@ -460,7 +460,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? setServices -publicvoidsetServices(MasterServicesservices) +publicvoidsetServices(MasterServicesservices) @@ -469,7 +469,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? setClusterMetrics -publicvoidsetClusterMetrics(ClusterMetricsstatus) +publicvoidsetClusterMetrics(ClusterMetricsstatus) @@ -478,7 +478,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? scheduleFullRefresh -privatebooleanscheduleFullRefresh() +privatebooleanscheduleFullRefresh() Refresh all the region locations. Returns: @@ -492,7 +492,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getTopBlockLocations -protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion) +protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion) @@ -501,7 +501,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getTopBlockLocations -protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion, +protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringcurrentHost) Returns an ordered list of hosts which have better locality for this region than the current host. @@ -513,7 +513,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? internalGetTopBlockLocation -protectedHDFSBlocksDistributioninternalGetTopBlockLocation(RegionInforegion) +protectedHDFSBlocksDistributioninternalGetTopBlockLocation(RegionInforegion) Returns an ordered list of hosts that are hosting the blocks for this region. The weight of each host is the sum of the block lengths of all files on that host, so the first host in the list is the server which holds @@ -532,7 +532,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getTableDescriptor -protectedTableDescriptorgetTableDescriptor(TableNametableName) +protectedTableDescriptorgetTableDescriptor(TableNametableName) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException return TableDescriptor for a given tableName @@ -551,7 +551,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? mapHostNameToServerName
[50/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/acid-semantics.html -- diff --git a/acid-semantics.html b/acid-semantics.html index 5aee2e4..d5fe621 100644 --- a/acid-semantics.html +++ b/acid-semantics.html @@ -7,7 +7,7 @@ - + Apache HBase Apache HBase (TM) ACID Properties @@ -606,7 +606,7 @@ under the License. --> https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-22 + Last Published: 2018-01-23 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apache_hbase_reference_guide.pdf -- diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf index f9b1c1d..38f7ba8 100644 --- a/apache_hbase_reference_guide.pdf +++ b/apache_hbase_reference_guide.pdf @@ -5,8 +5,8 @@ /Author (Apache HBase Team) /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2) /Producer (Apache HBase Team) -/ModDate (D:20180122144849+00'00') -/CreationDate (D:20180122144849+00'00') +/ModDate (D:20180123144836+00'00') +/CreationDate (D:20180123144836+00'00') >> endobj 2 0 obj @@ -28014,7 +28014,7 @@ endobj endobj 136 0 obj << /Limits [(__anchor-top) (adding.new.node)] -/Names [(__anchor-top) 25 0 R (__indexterm-7409344) 3452 0 R (__indexterm-7411594) 3454 0 R (__indexterm-7413656) 3455 0 R (__indexterm-7415530) 3456 0 R (acid) 912 0 R (acl) 3273 0 R (add-metric-name-and-function-to-hadoop-compat-interface) 3551 0 R (add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3553 0 R (add.metrics) 3549 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3793 0 R (adding.new.node) 3017 0 R] +/Names [(__anchor-top) 25 0 R (__indexterm-7409342) 3452 0 R (__indexterm-7411592) 3454 0 R (__indexterm-7413654) 3455 0 R (__indexterm-7415528) 3456 0 R (acid) 912 0 R (acl) 3273 0 R (add-metric-name-and-function-to-hadoop-compat-interface) 3551 0 R (add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3553 0 R (add.metrics) 3549 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3793 0 R (adding.new.node) 3017 0 R] >> endobj 137 0 obj http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/index-all.html -- diff --git a/apidocs/index-all.html b/apidocs/index-all.html index a31b217..ae128c9 100644 --- a/apidocs/index-all.html +++ b/apidocs/index-all.html @@ -5655,6 +5655,14 @@ Flush an individual region. +flushRegionServer(ServerName) - Method in interface org.apache.hadoop.hbase.client.Admin + +Flush all regions on the region server. + +flushRegionServer(ServerName) - Method in interface org.apache.hadoop.hbase.client.AsyncAdmin + +Flush all region on the region server. + flushRegionsIfNecessary(Configuration) - Static method in class org.apache.hadoop.hbase.mapreduce.Import If the durability is set to Durability.SKIP_WAL and the data is imported to hbase, we http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/org/apache/hadoop/hbase/class-use/ServerName.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/ServerName.html b/apidocs/org/apache/hadoop/hbase/class-use/ServerName.html index 0a67f20..6ea4d25 100644 --- a/apidocs/org/apache/hadoop/hbase/class-use/ServerName.html +++ b/apidocs/org/apache/hadoop/hbase/class-use/ServerName.html @@ -538,6 +538,18 @@ +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void +AsyncAdmin.flushRegionServer(ServerNameserverName) +Flush all region on the region server. + + + +void +Admin.flushRegionServer(ServerNameserverName) +Flush all regions on the region server. + + + http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo Admin.getOnlineRegions(ServerNamesn) Deprecated.
[27/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html index f8c49cb..e3762b3 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html @@ -1265,7 +1265,7 @@ implements CACHE_FIXED_OVERHEAD -public static finallong CACHE_FIXED_OVERHEAD +public static finallong CACHE_FIXED_OVERHEAD @@ -1434,7 +1434,7 @@ implements cacheBlock -publicvoidcacheBlock(BlockCacheKeycacheKey, +publicvoidcacheBlock(BlockCacheKeycacheKey, Cacheablebuf) Cache the block with the specified name and buffer. @@ -1453,7 +1453,7 @@ implements updateSizeMetrics -privatelongupdateSizeMetrics(LruCachedBlockcb, +privatelongupdateSizeMetrics(LruCachedBlockcb, booleanevict) Helper function that updates the local size counter and also updates any per-cf or per-blocktype metrics it can discern from given @@ -1466,7 +1466,7 @@ implements getBlock -publicCacheablegetBlock(BlockCacheKeycacheKey, +publicCacheablegetBlock(BlockCacheKeycacheKey, booleancaching, booleanrepeat, booleanupdateCacheMetrics) @@ -1492,7 +1492,7 @@ implements containsBlock -publicbooleancontainsBlock(BlockCacheKeycacheKey) +publicbooleancontainsBlock(BlockCacheKeycacheKey) Whether the cache contains block with specified cacheKey Returns: @@ -1506,7 +1506,7 @@ implements evictBlock -publicbooleanevictBlock(BlockCacheKeycacheKey) +publicbooleanevictBlock(BlockCacheKeycacheKey) Description copied from interface:BlockCache Evict block from cache. @@ -1525,7 +1525,7 @@ implements evictBlocksByHfileName -publicintevictBlocksByHfileName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringhfileName) +publicintevictBlocksByHfileName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringhfileName) Evicts all blocks for a specific HFile. This is an expensive operation implemented as a linear-time search through all blocks in the cache. Ideally this should be a search in a log-access-time map. @@ -1546,7 +1546,7 @@ implements evictBlock -protectedlongevictBlock(LruCachedBlockblock, +protectedlongevictBlock(LruCachedBlockblock, booleanevictedByEvictionProcess) Evict the block, and it will be cached by the victim handler if exists block may be read again later @@ -1565,7 +1565,7 @@ implements runEviction -privatevoidrunEviction() +privatevoidrunEviction() Multi-threaded call to run the eviction process. @@ -1575,7 +1575,7 @@ implements isEvictionInProgress -booleanisEvictionInProgress() +booleanisEvictionInProgress() @@ -1584,7 +1584,7 @@ implements getOverhead -longgetOverhead() +longgetOverhead() @@ -1593,7 +1593,7 @@ implements evict -voidevict() +voidevict() Eviction method. @@ -1603,7 +1603,7 @@ implements toString -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() Overrides: http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toStringin classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -1616,7 +1616,7 @@ implements getMaxSize -publiclonggetMaxSize() +publiclonggetMaxSize() Get the maximum size of this cache. Specified by: @@ -1632,7 +1632,7 @@ implements getCurrentSize -publiclonggetCurrentSize() +publiclonggetCurrentSize() Description copied from interface:BlockCache Returns the occupied size of the block cache, in bytes. @@ -1649,7 +1649,7 @@ implements getCurrentDataSize -publiclonggetCurrentDataSize() +publiclonggetCurrentDataSize() Description copied from interface:BlockCache Returns the occupied size of data blocks, in bytes. @@ -1666,7 +1666,7 @@ implements getFreeSize -publiclonggetFreeSize() +publiclonggetFreeSize() Description copied from interface:BlockCache Returns the free size of the block cache, in bytes. @@ -1683,7 +1683,7 @@ implements size -publiclongsize() +publiclongsize() Description copied from interface:BlockCache Returns the total size of the block cache, in bytes. @@ -1700,7 +1700,7 @@ implements getBlockCount
[20/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html index e3f0e57..61789bb 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html +++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html @@ -965,7 +965,7 @@ extends masterServices -privateMasterServices masterServices +privateMasterServices masterServices @@ -974,7 +974,7 @@ extends masterObserverGetter -privateCoprocessorHost.ObserverGetterMasterCoprocessor,MasterObserver masterObserverGetter +privateCoprocessorHost.ObserverGetterMasterCoprocessor,MasterObserver masterObserverGetter @@ -991,7 +991,7 @@ extends MasterCoprocessorHost -publicMasterCoprocessorHost(MasterServicesservices, +publicMasterCoprocessorHost(MasterServicesservices, org.apache.hadoop.conf.Configurationconf) @@ -1009,7 +1009,7 @@ extends createEnvironment -publicMasterCoprocessorHost.MasterEnvironmentcreateEnvironment(MasterCoprocessorinstance, +publicMasterCoprocessorHost.MasterEnvironmentcreateEnvironment(MasterCoprocessorinstance, intpriority, intseq, org.apache.hadoop.conf.Configurationconf) @@ -1027,7 +1027,7 @@ extends checkAndGetInstance -publicMasterCoprocessorcheckAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class?implClass) +publicMasterCoprocessorcheckAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class?implClass) throws http://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true; title="class or interface in java.lang">InstantiationException, http://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true; title="class or interface in java.lang">IllegalAccessException Description copied from class:CoprocessorHost @@ -1051,7 +1051,7 @@ extends preCreateNamespace -publicvoidpreCreateNamespace(NamespaceDescriptorns) +publicvoidpreCreateNamespace(NamespaceDescriptorns) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -1065,7 +1065,7 @@ extends postCreateNamespace -publicvoidpostCreateNamespace(NamespaceDescriptorns) +publicvoidpostCreateNamespace(NamespaceDescriptorns) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -1079,7 +1079,7 @@ extends preDeleteNamespace -publicvoidpreDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringnamespaceName) +publicvoidpreDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringnamespaceName) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -1093,7 +1093,7 @@ extends postDeleteNamespace -publicvoidpostDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringnamespaceName) +publicvoidpostDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringnamespaceName) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -1107,7 +1107,7 @@ extends preModifyNamespace -publicvoidpreModifyNamespace(NamespaceDescriptorns) +publicvoidpreModifyNamespace(NamespaceDescriptorns) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -1121,7 +1121,7 @@ extends postModifyNamespace -publicvoidpostModifyNamespace(NamespaceDescriptorns) +publicvoidpostModifyNamespace(NamespaceDescriptorns) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or
[45/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html index f331c7a..5c18b82 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html @@ -303,936 +303,942 @@ 295 CompletableFutureVoid flushRegion(byte[] regionName); 296 297 /** -298 * Compact a table. When the returned CompletableFuture is done, it only means the compact request -299 * was sent to HBase and may need some time to finish the compact operation. -300 * @param tableName table to compact -301 */ -302 default CompletableFutureVoid compact(TableName tableName) { -303return compact(tableName, CompactType.NORMAL); -304 } -305 -306 /** -307 * Compact a column family within a table. When the returned CompletableFuture is done, it only -308 * means the compact request was sent to HBase and may need some time to finish the compact -309 * operation. -310 * @param tableName table to compact -311 * @param columnFamily column family within a table. If not present, compact the table's all -312 * column families. -313 */ -314 default CompletableFutureVoid compact(TableName tableName, byte[] columnFamily) { -315return compact(tableName, columnFamily, CompactType.NORMAL); -316 } -317 -318 /** -319 * Compact a table. When the returned CompletableFuture is done, it only means the compact request -320 * was sent to HBase and may need some time to finish the compact operation. -321 * @param tableName table to compact -322 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} -323 */ -324 CompletableFutureVoid compact(TableName tableName, CompactType compactType); -325 -326 /** -327 * Compact a column family within a table. When the returned CompletableFuture is done, it only -328 * means the compact request was sent to HBase and may need some time to finish the compact -329 * operation. -330 * @param tableName table to compact -331 * @param columnFamily column family within a table -332 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} -333 */ -334 CompletableFutureVoid compact(TableName tableName, byte[] columnFamily, -335 CompactType compactType); -336 -337 /** -338 * Compact an individual region. When the returned CompletableFuture is done, it only means the -339 * compact request was sent to HBase and may need some time to finish the compact operation. -340 * @param regionName region to compact -341 */ -342 CompletableFutureVoid compactRegion(byte[] regionName); -343 -344 /** -345 * Compact a column family within a region. When the returned CompletableFuture is done, it only -346 * means the compact request was sent to HBase and may need some time to finish the compact -347 * operation. -348 * @param regionName region to compact -349 * @param columnFamily column family within a region. If not present, compact the region's all -350 * column families. -351 */ -352 CompletableFutureVoid compactRegion(byte[] regionName, byte[] columnFamily); -353 -354 /** -355 * Major compact a table. When the returned CompletableFuture is done, it only means the compact -356 * request was sent to HBase and may need some time to finish the compact operation. -357 * @param tableName table to major compact -358 */ -359 default CompletableFutureVoid majorCompact(TableName tableName) { -360return majorCompact(tableName, CompactType.NORMAL); -361 } -362 -363 /** -364 * Major compact a column family within a table. When the returned CompletableFuture is done, it -365 * only means the compact request was sent to HBase and may need some time to finish the compact -366 * operation. -367 * @param tableName table to major compact -368 * @param columnFamily column family within a table. If not present, major compact the table's all -369 * column families. -370 */ -371 default CompletableFutureVoid majorCompact(TableName tableName, byte[] columnFamily) { -372return majorCompact(tableName, columnFamily, CompactType.NORMAL); -373 } -374 -375 /** -376 * Major compact a table. When the returned CompletableFuture is done, it only means the compact -377 * request was sent to HBase and may need some time to finish the compact operation. -378 * @param tableName table to major compact -379 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} -380 */ -381 CompletableFutureVoid majorCompact(TableName tableName, CompactType compactType); -382 -383 /** -384 * Major compact a column family within a table. When the returned CompletableFuture is done, it -385
[38/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html index 45e4434..5639266 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -472,140 +472,146 @@ implements +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void +flushRegionServer(ServerNamesn) +Flush all region on the region server. + + + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureClusterMetrics getClusterMetrics() - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureClusterMetrics getClusterMetrics(http://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true; title="class or interface in java.util">EnumSetClusterMetrics.Optionoptions) - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureCompactionState getCompactionState(TableNametableName, CompactTypecompactType) Get the current compaction state of a table. - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureCompactionState getCompactionStateForRegion(byte[]regionName) Get the current compaction state of region. - + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor getDescriptor(TableNametableName) Method for getting the
[25/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html index 1eccab2..10c78e1 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class BucketCache +public class BucketCache extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements BlockCache, HeapSize BucketCache uses BucketAllocator to allocate/free blocks, and uses @@ -781,7 +781,7 @@ implements LOG -private static finalorg.slf4j.Logger LOG +private static finalorg.slf4j.Logger LOG @@ -790,7 +790,7 @@ implements SINGLE_FACTOR_CONFIG_NAME -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String SINGLE_FACTOR_CONFIG_NAME +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String SINGLE_FACTOR_CONFIG_NAME Priority buckets config See Also: @@ -804,7 +804,7 @@ implements MULTI_FACTOR_CONFIG_NAME -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MULTI_FACTOR_CONFIG_NAME +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MULTI_FACTOR_CONFIG_NAME See Also: Constant Field Values @@ -817,7 +817,7 @@ implements MEMORY_FACTOR_CONFIG_NAME -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MEMORY_FACTOR_CONFIG_NAME +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MEMORY_FACTOR_CONFIG_NAME See Also: Constant Field Values @@ -830,7 +830,7 @@ implements EXTRA_FREE_FACTOR_CONFIG_NAME -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String EXTRA_FREE_FACTOR_CONFIG_NAME +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String EXTRA_FREE_FACTOR_CONFIG_NAME See Also: Constant Field Values @@ -843,7 +843,7 @@ implements ACCEPT_FACTOR_CONFIG_NAME -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String ACCEPT_FACTOR_CONFIG_NAME +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String ACCEPT_FACTOR_CONFIG_NAME See Also: Constant Field Values @@ -856,7 +856,7 @@ implements MIN_FACTOR_CONFIG_NAME -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MIN_FACTOR_CONFIG_NAME +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MIN_FACTOR_CONFIG_NAME See Also: Constant Field Values @@ -869,7 +869,7 @@ implements DEFAULT_SINGLE_FACTOR -static finalfloat DEFAULT_SINGLE_FACTOR +static finalfloat DEFAULT_SINGLE_FACTOR Priority buckets See Also: @@ -883,7 +883,7 @@ implements DEFAULT_MULTI_FACTOR -static finalfloat DEFAULT_MULTI_FACTOR +static finalfloat DEFAULT_MULTI_FACTOR See Also: Constant Field Values @@ -896,7 +896,7 @@ implements DEFAULT_MEMORY_FACTOR -static finalfloat DEFAULT_MEMORY_FACTOR +static finalfloat DEFAULT_MEMORY_FACTOR See Also: Constant Field Values @@ -909,7 +909,7 @@ implements DEFAULT_MIN_FACTOR -static finalfloat DEFAULT_MIN_FACTOR +static finalfloat DEFAULT_MIN_FACTOR See Also: Constant Field Values @@ -922,7 +922,7 @@ implements DEFAULT_EXTRA_FREE_FACTOR -private static finalfloat DEFAULT_EXTRA_FREE_FACTOR +private static finalfloat DEFAULT_EXTRA_FREE_FACTOR See Also: Constant Field Values @@ -935,7 +935,7 @@ implements DEFAULT_ACCEPT_FACTOR -private static finalfloat DEFAULT_ACCEPT_FACTOR +private static finalfloat DEFAULT_ACCEPT_FACTOR See Also: Constant Field Values @@ -948,7 +948,7 @@ implements DEFAULT_FREE_ENTIRE_BLOCK_FACTOR -private static finalint DEFAULT_FREE_ENTIRE_BLOCK_FACTOR +private static finalint DEFAULT_FREE_ENTIRE_BLOCK_FACTOR See Also: Constant Field Values @@ -961,7 +961,7 @@ implements
[28/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html index 792ce9b..2304927 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html @@ -990,7 +990,7 @@ implements DESERIALIZER_IDENTIFIER -private static finalint DESERIALIZER_IDENTIFIER +private static finalint DESERIALIZER_IDENTIFIER @@ -1007,7 +1007,7 @@ implements HFileBlock -privateHFileBlock(HFileBlockthat) +privateHFileBlock(HFileBlockthat) Copy constructor. Creates a shallow copy of that's buffer. @@ -1017,7 +1017,7 @@ implements HFileBlock -privateHFileBlock(HFileBlockthat, +privateHFileBlock(HFileBlockthat, booleanbufCopy) Copy constructor. Creates a shallow/deep copy of that's buffer as per the boolean param. @@ -1029,7 +1029,7 @@ implements HFileBlock -HFileBlock(BlockTypeblockType, +HFileBlock(BlockTypeblockType, intonDiskSizeWithoutHeader, intuncompressedSizeWithoutHeader, longprevBlockOffset, @@ -1067,7 +1067,7 @@ implements HFileBlock -HFileBlock(ByteBuffbuf, +HFileBlock(ByteBuffbuf, booleanusesHBaseChecksum, Cacheable.MemoryTypememType, longoffset, @@ -1100,7 +1100,7 @@ implements init -privatevoidinit(BlockTypeblockType, +privatevoidinit(BlockTypeblockType, intonDiskSizeWithoutHeader, intuncompressedSizeWithoutHeader, longprevBlockOffset, @@ -1117,7 +1117,7 @@ implements getOnDiskSizeWithHeader -private staticintgetOnDiskSizeWithHeader(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferheaderBuf, +private staticintgetOnDiskSizeWithHeader(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferheaderBuf, booleanverifyChecksum) Parse total on disk size including header and checksum. @@ -1135,7 +1135,7 @@ implements getNextBlockOnDiskSize -intgetNextBlockOnDiskSize() +intgetNextBlockOnDiskSize() Returns: the on-disk size of the next block (including the header size and any checksums if @@ -1150,7 +1150,7 @@ implements getBlockType -publicBlockTypegetBlockType() +publicBlockTypegetBlockType() Specified by: getBlockTypein interfaceCacheable @@ -1165,7 +1165,7 @@ implements getDataBlockEncodingId -shortgetDataBlockEncodingId() +shortgetDataBlockEncodingId() Returns: get data block encoding id that was used to encode this block @@ -1178,7 +1178,7 @@ implements getOnDiskSizeWithHeader -publicintgetOnDiskSizeWithHeader() +publicintgetOnDiskSizeWithHeader() Returns: the on-disk size of header + data part + checksum. @@ -1191,7 +1191,7 @@ implements getOnDiskSizeWithoutHeader -intgetOnDiskSizeWithoutHeader() +intgetOnDiskSizeWithoutHeader() Returns: the on-disk size of the data part + checksum (header excluded). @@ -1204,7 +1204,7 @@ implements getUncompressedSizeWithoutHeader -intgetUncompressedSizeWithoutHeader() +intgetUncompressedSizeWithoutHeader() Returns: the uncompressed size of data part (header and checksum excluded). @@ -1217,7 +1217,7 @@ implements getPrevBlockOffset -longgetPrevBlockOffset() +longgetPrevBlockOffset() Returns: the offset of the previous block of the same type in the file, or @@ -1231,7 +1231,7 @@ implements overwriteHeader -privatevoidoverwriteHeader() +privatevoidoverwriteHeader() Rewinds buf and writes first 4 header fields. buf position is modified as side-effect. @@ -1242,7 +1242,7 @@ implements getBufferWithoutHeader -publicByteBuffgetBufferWithoutHeader() +publicByteBuffgetBufferWithoutHeader() Returns a buffer that does not include the header or checksum. Returns: @@ -1256,7 +1256,7 @@ implements getBufferReadOnly -publicByteBuffgetBufferReadOnly() +publicByteBuffgetBufferReadOnly() Returns a read-only duplicate of the buffer this block stores internally ready to be read. Clients must not modify the buffer object though they may set position and limit on the returned buffer since we pass back a duplicate. This method has to be public because it is used @@ -1275,7 +1275,7 @@ implements sanityCheckAssertion -privatevoidsanityCheckAssertion(longvalueFromBuf, +privatevoidsanityCheckAssertion(longvalueFromBuf, longvalueFromField, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfieldName) throws
[26/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html index 9baa74f..c6877be 100644 --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -private class BucketCache.BucketEntryGroup +private class BucketCache.BucketEntryGroup extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableBucketCache.BucketEntryGroup Used to group bucket entries into priority buckets. There will be a @@ -198,17 +198,21 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl boolean -equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Objectthat) +equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Objecto) long free(longtoFree) +int +hashCode() + + long overflow() - + long totalSize() @@ -218,7 +222,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--; title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.ht ml?is-external=true#toString--" title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--; title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.ht ml?is-external=true#wait--" title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait @@ -240,7 +244,7 @@ implements
[18/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html index c448105..4f6b995 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html +++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -public static class RegionStates.RegionStateNode +public static class RegionStates.RegionStateNode extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableRegionStates.RegionStateNode Current Region State. @@ -360,7 +360,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl regionInfo -private finalRegionInfo regionInfo +private finalRegionInfo regionInfo @@ -369,7 +369,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl event -private finalProcedureEvent? event +private finalProcedureEvent? event @@ -378,7 +378,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl procedure -private volatileRegionTransitionProcedure procedure +private volatileRegionTransitionProcedure procedure @@ -387,7 +387,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl regionLocation -private volatileServerName regionLocation +private volatileServerName regionLocation @@ -396,7 +396,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl lastHost -private volatileServerName lastHost +private volatileServerName lastHost @@ -405,7 +405,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl state -private volatileRegionState.State state +private volatileRegionState.State state A Region-in-Transition (RIT) moves through states. See RegionState.State for complete list. A Region that is opened moves from OFFLINE => OPENING => OPENED. @@ -417,7 +417,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl lastUpdate -private volatilelong lastUpdate +private volatilelong lastUpdate Updated whenever a call to setRegionLocation(ServerName) or #setState(State, State...). @@ -428,7 +428,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl openSeqNum -private volatilelong openSeqNum +private volatilelong openSeqNum @@ -445,7 +445,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl RegionStateNode -publicRegionStateNode(RegionInforegionInfo) +publicRegionStateNode(RegionInforegionInfo) @@ -462,7 +462,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl setState -publicbooleansetState(RegionState.Stateupdate, +publicbooleansetState(RegionState.Stateupdate, RegionState.State...expected) Parameters: @@ -479,7 +479,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl offline -publicServerNameoffline() +publicServerNameoffline() Put region into OFFLINE mode (set state and clear location). Returns: @@ -493,7 +493,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl transitionState -publicvoidtransitionState(RegionState.Stateupdate, +publicvoidtransitionState(RegionState.Stateupdate, RegionState.State...expected) throws UnexpectedStateException Set new RegionState.State but only if currently in expected State @@ -510,7 +510,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl isInState -publicbooleanisInState(RegionState.State...expected) +publicbooleanisInState(RegionState.State...expected) @@ -519,7 +519,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl isStuck -publicbooleanisStuck() +publicbooleanisStuck() @@ -528,7 +528,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl isInTransition -publicbooleanisInTransition() +publicbooleanisInTransition() @@ -537,7 +537,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl getLastUpdate -publiclonggetLastUpdate() +publiclonggetLastUpdate() @@ -546,7 +546,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl setLastHost -publicvoidsetLastHost(ServerNameserverName) +publicvoidsetLastHost(ServerNameserverName) @@ -555,7 +555,7 @@ implements
[15/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html index 733dac1..4cc62fd 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html +++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -private final class RSProcedureDispatcher.OpenRegionRemoteCall +private final class RSProcedureDispatcher.OpenRegionRemoteCall extends RSProcedureDispatcher.AbstractRSRemoteCall Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver to open regions using old AdminService#openRegion(RpcController, OpenRegionRequest, RpcCallback) rpc. @@ -230,7 +230,7 @@ extends operations -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperation operations +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperation operations @@ -247,7 +247,7 @@ extends OpenRegionRemoteCall -publicOpenRegionRemoteCall(ServerNameserverName, +publicOpenRegionRemoteCall(ServerNameserverName, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations) @@ -265,7 +265,7 @@ extends call -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Voidcall() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Voidcall() Specified by: http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--; title="class or interface in java.util.concurrent">callin interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true; title="class or interface in java.util.concurrent">Callablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void @@ -280,7 +280,7 @@ extends sendRequest -privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponsesendRequest(ServerNameserverName, +privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponsesendRequest(ServerNameserverName, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestrequest) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -295,7 +295,7 @@ extends remoteCallFailed -privatevoidremoteCallFailed(MasterProcedureEnvenv, +privatevoidremoteCallFailed(MasterProcedureEnvenv, http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOExceptione) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html index 88c6085..695b8f8 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html +++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html @@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab"; -public static class RSProcedureDispatcher.RegionCloseOperation +public static class RSProcedureDispatcher.RegionCloseOperation extends RSProcedureDispatcher.RegionOperation @@ -243,7 +243,7 @@ extends destinationServer -private finalServerName destinationServer +private finalServerName destinationServer @@ -252,7 +252,7 @@ extends closed -privateboolean closed +privateboolean closed @@ -269,7 +269,7 @@ extends RegionCloseOperation
[46/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html index 1771444..131c832 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html @@ -834,1892 +834,1899 @@ 826 void flushRegion(byte[] regionName) throws IOException; 827 828 /** -829 * Compact a table. Asynchronous operation in that this method requests that a -830 * Compaction run and then it returns. It does not wait on the completion of Compaction -831 * (it can take a while). -832 * -833 * @param tableName table to compact -834 * @throws IOException if a remote or network exception occurs -835 */ -836 void compact(TableName tableName) throws IOException; -837 -838 /** -839 * Compact an individual region. Asynchronous operation in that this method requests that a -840 * Compaction run and then it returns. It does not wait on the completion of Compaction -841 * (it can take a while). -842 * -843 * @param regionName region to compact -844 * @throws IOException if a remote or network exception occurs -845 */ -846 void compactRegion(byte[] regionName) throws IOException; -847 -848 /** -849 * Compact a column family within a table. Asynchronous operation in that this method requests -850 * that a Compaction run and then it returns. It does not wait on the completion of Compaction -851 * (it can take a while). -852 * -853 * @param tableName table to compact -854 * @param columnFamily column family within a table -855 * @throws IOException if a remote or network exception occurs -856 */ -857 void compact(TableName tableName, byte[] columnFamily) -858throws IOException; -859 -860 /** -861 * Compact a column family within a region. Asynchronous operation in that this method requests -862 * that a Compaction run and then it returns. It does not wait on the completion of Compaction -863 * (it can take a while). -864 * -865 * @param regionName region to compact -866 * @param columnFamily column family within a region -867 * @throws IOException if a remote or network exception occurs -868 */ -869 void compactRegion(byte[] regionName, byte[] columnFamily) -870throws IOException; -871 -872 /** -873 * Compact a table. Asynchronous operation in that this method requests that a -874 * Compaction run and then it returns. It does not wait on the completion of Compaction -875 * (it can take a while). -876 * -877 * @param tableName table to compact -878 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} -879 * @throws IOException if a remote or network exception occurs -880 * @throws InterruptedException -881 */ -882 void compact(TableName tableName, CompactType compactType) -883throws IOException, InterruptedException; -884 -885 /** -886 * Compact a column family within a table. Asynchronous operation in that this method -887 * requests that a Compaction run and then it returns. It does not wait on the -888 * completion of Compaction (it can take a while). -889 * -890 * @param tableName table to compact -891 * @param columnFamily column family within a table -892 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} -893 * @throws IOException if not a mob column family or if a remote or network exception occurs -894 * @throws InterruptedException -895 */ -896 void compact(TableName tableName, byte[] columnFamily, CompactType compactType) -897throws IOException, InterruptedException; -898 -899 /** -900 * Major compact a table. Asynchronous operation in that this method requests -901 * that a Compaction run and then it returns. It does not wait on the completion of Compaction -902 * (it can take a while). -903 * -904 * @param tableName table to major compact -905 * @throws IOException if a remote or network exception occurs -906 */ -907 void majorCompact(TableName tableName) throws IOException; -908 -909 /** -910 * Major compact a table or an individual region. Asynchronous operation in that this method requests -911 * that a Compaction run and then it returns. It does not wait on the completion of Compaction -912 * (it can take a while). -913 * -914 * @param regionName region to major compact -915 * @throws IOException if a remote or network exception occurs -916 */ -917 void majorCompactRegion(byte[] regionName) throws IOException; -918 -919 /** -920 * Major compact a column family within a table. Asynchronous operation in that this method requests -921 * that a Compaction run and then it returns. It does not wait on the completion of Compaction -922 * (it can take a
[30/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/filter/FilterWrapper.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterWrapper.html b/devapidocs/org/apache/hadoop/hbase/filter/FilterWrapper.html index 6bc8595..d7d7ff5 100644 --- a/devapidocs/org/apache/hadoop/hbase/filter/FilterWrapper.html +++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterWrapper.html @@ -612,7 +612,7 @@ public filterCell -publicFilter.ReturnCodefilterCell(Cellc) +publicFilter.ReturnCodefilterCell(Cellc) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from class:Filter A way to filter based on the column family, column qualifier and/or the column value. Return @@ -649,7 +649,7 @@ public transformCell -publicCelltransformCell(Cellv) +publicCelltransformCell(Cellv) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from class:Filter Give the filter a chance to transform the passed KeyValue. If the Cell is changed a new @@ -680,7 +680,7 @@ public hasFilterRow -publicbooleanhasFilterRow() +publicbooleanhasFilterRow() Description copied from class:Filter Primarily used to check for conflicts with scans(such as scans that do not read a full row at a time). @@ -698,7 +698,7 @@ public filterRowCells -publicvoidfilterRowCells(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListCellkvs) +publicvoidfilterRowCells(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListCellkvs) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from class:Filter Chance to alter the list of Cells to be submitted. Modifications to the list will carry on @@ -721,7 +721,7 @@ public filterRowCellsWithRet -publicFilterWrapper.FilterRowRetCodefilterRowCellsWithRet(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListCellkvs) +publicFilterWrapper.FilterRowRetCodefilterRowCellsWithRet(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListCellkvs) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -735,7 +735,7 @@ public isFamilyEssential -publicbooleanisFamilyEssential(byte[]name) +publicbooleanisFamilyEssential(byte[]name) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from class:Filter Check that given column family is essential for filter to check row. Most filters always return @@ -759,7 +759,7 @@ public areSerializedFieldsEqual -booleanareSerializedFieldsEqual(Filtero) +booleanareSerializedFieldsEqual(Filtero) Description copied from class:Filter Concrete implementers can signal a failure condition in their code by throwing an http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html index 1887530..509b010 100644 --- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html @@ -183,13 +183,13 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) org.apache.hadoop.hbase.filter.Filter.ReturnCode -org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode +org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp org.apache.hadoop.hbase.filter.FilterList.Operator +org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
[05/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html index 4a166a4..7a49825 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html @@ -309,7 +309,7 @@ extends getUsage -protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetUsage() +protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetUsage() Description copied from class:ServerCommandLine Implementing subclasses should return a usage string to print out. @@ -324,7 +324,7 @@ extends start -privateintstart() +privateintstart() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -338,7 +338,7 @@ extends run -publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) +publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html index db4f709..44dae4e 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -private final class HStore.StoreFlusherImpl +private final class HStore.StoreFlusherImpl extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements StoreFlushContext @@ -279,7 +279,7 @@ implements tracker -private finalFlushLifeCycleTracker tracker +private finalFlushLifeCycleTracker tracker @@ -288,7 +288,7 @@ implements cacheFlushSeqNum -private finallong cacheFlushSeqNum +private finallong cacheFlushSeqNum @@ -297,7 +297,7 @@ implements snapshot -privateMemStoreSnapshot snapshot +privateMemStoreSnapshot snapshot @@ -306,7 +306,7 @@ implements tempFiles -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path tempFiles +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path tempFiles @@ -315,7 +315,7 @@ implements committedFiles -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path committedFiles +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path committedFiles @@ -324,7 +324,7 @@ implements cacheFlushCount -privatelong cacheFlushCount +privatelong cacheFlushCount @@ -333,7 +333,7 @@ implements cacheFlushSize -privatelong cacheFlushSize +privatelong cacheFlushSize @@ -342,7 +342,7 @@ implements outputFileSize -privatelong outputFileSize +privatelong outputFileSize @@ -359,7 +359,7 @@ implements StoreFlusherImpl -privateStoreFlusherImpl(longcacheFlushSeqNum, +privateStoreFlusherImpl(longcacheFlushSeqNum, FlushLifeCycleTrackertracker) @@ -377,7 +377,7 @@ implements prepare -publicMemStoreSizeprepare() +publicMemStoreSizeprepare() This is not thread safe. The caller should have a lock on the region or the store. If necessary, the lock can be added with the patch provided in HBASE-10087 @@ -394,7 +394,7 @@ implements flushCache -publicvoidflushCache(MonitoredTaskstatus) +publicvoidflushCache(MonitoredTaskstatus) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from
[13/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html index 0e12e96..e0e432d 100644 --- a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html +++ b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html @@ -872,7 +872,7 @@ extends pruneOldRegionReports -voidpruneOldRegionReports() +voidpruneOldRegionReports() Removes region reports over a certain age. @@ -882,7 +882,7 @@ extends fetchAllTablesWithQuotasDefined -QuotaObserverChore.TablesWithQuotasfetchAllTablesWithQuotasDefined() +QuotaObserverChore.TablesWithQuotasfetchAllTablesWithQuotasDefined() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Computes the set of all tables that have quotas defined. This includes tables with quotas explicitly set on them, in addition to tables that exist namespaces which have a quota @@ -899,7 +899,7 @@ extends getTableSnapshotStore -QuotaSnapshotStoreTableNamegetTableSnapshotStore() +QuotaSnapshotStoreTableNamegetTableSnapshotStore() @@ -908,7 +908,7 @@ extends getNamespaceSnapshotStore -QuotaSnapshotStorehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetNamespaceSnapshotStore() +QuotaSnapshotStorehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetNamespaceSnapshotStore() @@ -917,7 +917,7 @@ extends getTableQuotaSnapshots -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapTableName,SpaceQuotaSnapshotgetTableQuotaSnapshots() +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapTableName,SpaceQuotaSnapshotgetTableQuotaSnapshots() Returns an unmodifiable view over the current SpaceQuotaSnapshot objects for each HBase table with a quota defined. @@ -928,7 +928,7 @@ extends getNamespaceQuotaSnapshots -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,SpaceQuotaSnapshotgetNamespaceQuotaSnapshots() +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,SpaceQuotaSnapshotgetNamespaceQuotaSnapshots() Returns an unmodifiable view over the current SpaceQuotaSnapshot objects for each HBase namespace with a quota defined. @@ -939,7 +939,7 @@ extends getTableQuotaSnapshot -SpaceQuotaSnapshotgetTableQuotaSnapshot(TableNametable) +SpaceQuotaSnapshotgetTableQuotaSnapshot(TableNametable) Fetches the SpaceQuotaSnapshot for the given table. @@ -949,7 +949,7 @@ extends setTableQuotaSnapshot -voidsetTableQuotaSnapshot(TableNametable, +voidsetTableQuotaSnapshot(TableNametable, SpaceQuotaSnapshotsnapshot) Stores the quota state for the given table. @@ -960,7 +960,7 @@ extends getNamespaceQuotaSnapshot -SpaceQuotaSnapshotgetNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringnamespace) +SpaceQuotaSnapshotgetNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringnamespace) Fetches the SpaceQuotaSnapshot for the given namespace from this chore. @@ -970,7 +970,7 @@ extends setNamespaceQuotaSnapshot -voidsetNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringnamespace, +voidsetNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringnamespace, SpaceQuotaSnapshotsnapshot) Stores the given snapshot for the given namespace in this chore. @@ -981,7 +981,7 @@ extends getPeriod -staticintgetPeriod(org.apache.hadoop.conf.Configurationconf) +staticintgetPeriod(org.apache.hadoop.conf.Configurationconf) Extracts the period for the chore from the configuration. Parameters: @@ -999,7 +999,7 @@ extends getInitialDelay
[03/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html index 358b5e2..5788cf7 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html @@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html close -voidclose() +voidclose() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Closes the scanner and releases any resources it has allocated http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html index 4132807..5428e2f 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html @@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab"; -protected static class KeyValueHeap.KVScannerComparator +protected static class KeyValueHeap.KVScannerComparator extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true; title="class or interface in java.util">ComparatorKeyValueScanner @@ -231,7 +231,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato kvComparator -protectedCellComparator kvComparator +protectedCellComparator kvComparator @@ -248,7 +248,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato KVScannerComparator -publicKVScannerComparator(CellComparatorkvComparator) +publicKVScannerComparator(CellComparatorkvComparator) Constructor Parameters: @@ -270,7 +270,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato compare -publicintcompare(KeyValueScannerleft, +publicintcompare(KeyValueScannerleft, KeyValueScannerright) Specified by: @@ -284,7 +284,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato compare -publicintcompare(Cellleft, +publicintcompare(Cellleft, Cellright) Compares two KeyValue @@ -302,7 +302,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato getComparator -publicCellComparatorgetComparator() +publicCellComparatorgetComparator() Returns: KVComparator http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html index 9195a40..67934f0 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html @@ -498,7 +498,7 @@ implements peek -publicCellpeek() +publicCellpeek() Description copied from interface:KeyValueScanner Look at the next Cell in this scanner, but do not iterate scanner. NOTICE: The returned cell has not been passed into ScanQueryMatcher. So it may not be what the @@ -517,7 +517,7 @@ implements next -publicCellnext() +publicCellnext() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from interface:KeyValueScanner Return the next Cell in this scanner, iterating the scanner @@ -537,7 +537,7 @@ implements next -publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListCellresult, +publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListCellresult, ScannerContextscannerContext) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Gets the next row of keys from the top-most scanner. @@ -564,7 +564,7 @@ implements close -publicvoidclose() +publicvoidclose() Description copied from
[11/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html index 64c3ba9..211ff82 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab"; -public static interface HRegion.BulkLoadListener +public static interface HRegion.BulkLoadListener Listener class to enable callers of bulkLoadHFile() to perform any necessary pre/post processing of a given bulkload call @@ -174,7 +174,7 @@ var activeTableTab = "activeTableTab"; prepareBulkLoad -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringprepareBulkLoad(byte[]family, +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringprepareBulkLoad(byte[]family, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsrcPath, booleancopyFile) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -196,7 +196,7 @@ var activeTableTab = "activeTableTab"; doneBulkLoad -voiddoneBulkLoad(byte[]family, +voiddoneBulkLoad(byte[]family, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsrcPath) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Called after a successful HFile load @@ -215,7 +215,7 @@ var activeTableTab = "activeTableTab"; failedBulkLoad -voidfailedBulkLoad(byte[]family, +voidfailedBulkLoad(byte[]family, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsrcPath) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Called after a failed HFile load http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html index ea28270..b12ee45 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -public static enum HRegion.FlushResult.Result +public static enum HRegion.FlushResult.Result extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumHRegion.FlushResult.Result @@ -216,7 +216,7 @@ the order they are declared. FLUSHED_NO_COMPACTION_NEEDED -public static finalHRegion.FlushResult.Result FLUSHED_NO_COMPACTION_NEEDED +public static finalHRegion.FlushResult.Result FLUSHED_NO_COMPACTION_NEEDED @@ -225,7 +225,7 @@ the order they are declared. FLUSHED_COMPACTION_NEEDED -public static finalHRegion.FlushResult.Result FLUSHED_COMPACTION_NEEDED +public static finalHRegion.FlushResult.Result FLUSHED_COMPACTION_NEEDED @@ -234,7 +234,7 @@ the order they are declared. CANNOT_FLUSH_MEMSTORE_EMPTY -public static finalHRegion.FlushResult.Result CANNOT_FLUSH_MEMSTORE_EMPTY +public static finalHRegion.FlushResult.Result CANNOT_FLUSH_MEMSTORE_EMPTY @@ -243,7 +243,7 @@ the order they are declared. CANNOT_FLUSH -public static finalHRegion.FlushResult.Result CANNOT_FLUSH +public static finalHRegion.FlushResult.Result CANNOT_FLUSH @@ -260,7 +260,7 @@ the order they are declared. values -public staticHRegion.FlushResult.Result[]values() +public staticHRegion.FlushResult.Result[]values() Returns an array containing the constants of this enum type, in the order they are declared. This method may be used to iterate over the constants as follows: @@ -280,7 +280,7 @@ for (HRegion.FlushResult.Result c : HRegion.FlushResult.Result.values()) valueOf -public
[07/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html index 6e2a4d4..3931484 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html @@ -182,7 +182,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? -private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGION_TEMP_DIR Temporary subdirectory of the region directory used for compaction output. @@ -708,7 +708,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? REGION_TEMP_DIR -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGION_TEMP_DIR +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGION_TEMP_DIR Temporary subdirectory of the region directory used for compaction output. See Also: http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html index cd074b5..e5f8df7 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -private static class HRegionServer.CompactionChecker +private static class HRegionServer.CompactionChecker extends ScheduledChore @@ -233,7 +233,7 @@ extends instance -private finalHRegionServer instance +private finalHRegionServer instance @@ -242,7 +242,7 @@ extends majorCompactPriority -private finalint majorCompactPriority +private finalint majorCompactPriority @@ -251,7 +251,7 @@ extends DEFAULT_PRIORITY -private static finalint DEFAULT_PRIORITY +private static finalint DEFAULT_PRIORITY See Also: Constant Field Values @@ -264,7 +264,7 @@ extends iteration -privatelong iteration +privatelong iteration @@ -281,7 +281,7 @@ extends CompactionChecker -CompactionChecker(HRegionServerh, +CompactionChecker(HRegionServerh, intsleepTime, Stoppablestopper) @@ -300,7 +300,7 @@ extends chore -protectedvoidchore() +protectedvoidchore() Description copied from class:ScheduledChore The task to execute on each scheduled execution of the Chore http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html index a387479..983e481 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab"; -private static class HRegionServer.MovedRegionInfo +private static class HRegionServer.MovedRegionInfo extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -218,7 +218,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? serverName -private finalServerName serverName +private finalServerName serverName @@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? seqNum -private finallong seqNum +private finallong seqNum @@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? ts -private finallong ts +private finallong ts @@ -253,7 +253,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? MovedRegionInfo -publicMovedRegionInfo(ServerNameserverName, +publicMovedRegionInfo(ServerNameserverName, longcloseSeqNum) @@ -271,7 +271,7 @@
[34/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html index a28a337..19f1c32 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html +++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab"; -private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer +private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer extends RawAsyncHBaseAdmin.TableProcedureBiConsumer @@ -232,7 +232,7 @@ extends AddColumnFamilyProcedureBiConsumer -AddColumnFamilyProcedureBiConsumer(TableNametableName) +AddColumnFamilyProcedureBiConsumer(TableNametableName) @@ -249,7 +249,7 @@ extends getOperationType -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() Specified by: getOperationTypein classRawAsyncHBaseAdmin.TableProcedureBiConsumer http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html index 00eacf7..4e87838 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html +++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab"; -private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer +private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer extends RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer @@ -232,7 +232,7 @@ extends CreateNamespaceProcedureBiConsumer -CreateNamespaceProcedureBiConsumer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringnamespaceName) +CreateNamespaceProcedureBiConsumer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringnamespaceName) @@ -249,7 +249,7 @@ extends getOperationType -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() Specified by: getOperationTypein classRawAsyncHBaseAdmin.NamespaceProcedureBiConsumer http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html index db3812a..c50ae9c 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html +++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab"; -private class RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer +private class RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer extends RawAsyncHBaseAdmin.TableProcedureBiConsumer @@ -232,7 +232,7 @@ extends CreateTableProcedureBiConsumer -CreateTableProcedureBiConsumer(TableNametableName) +CreateTableProcedureBiConsumer(TableNametableName) @@ -249,7 +249,7 @@ extends getOperationType -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() Specified by: getOperationTypein classRawAsyncHBaseAdmin.TableProcedureBiConsumer
[14/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html b/devapidocs/org/apache/hadoop/hbase/package-tree.html index 3d7dd40..8ad69a5 100644 --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html @@ -445,20 +445,20 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.ProcedureState -org.apache.hadoop.hbase.KeyValue.Type -org.apache.hadoop.hbase.MemoryCompactionPolicy +org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus org.apache.hadoop.hbase.CellBuilderType -org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage +org.apache.hadoop.hbase.CompareOperator org.apache.hadoop.hbase.HConstants.OperationStatusCode -org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus -org.apache.hadoop.hbase.MetaTableAccessor.QueryType -org.apache.hadoop.hbase.KeepDeletedCells org.apache.hadoop.hbase.Cell.Type +org.apache.hadoop.hbase.ProcedureState org.apache.hadoop.hbase.ClusterMetrics.Option -org.apache.hadoop.hbase.CompareOperator +org.apache.hadoop.hbase.KeepDeletedCells +org.apache.hadoop.hbase.MemoryCompactionPolicy +org.apache.hadoop.hbase.KeyValue.Type org.apache.hadoop.hbase.Size.Unit org.apache.hadoop.hbase.Coprocessor.State +org.apache.hadoop.hbase.MetaTableAccessor.QueryType +org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/procedure/Procedure.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure/Procedure.html b/devapidocs/org/apache/hadoop/hbase/procedure/Procedure.html index d8c18c8..2125999 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure/Procedure.html +++ b/devapidocs/org/apache/hadoop/hbase/procedure/Procedure.html @@ -481,7 +481,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren joinBarrierLock -privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object joinBarrierLock +private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object joinBarrierLock lock to prevent nodes from acquiring and then releasing before we can track them http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.html b/devapidocs/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.html index f2d8c67..f841737 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.html +++ b/devapidocs/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.html @@ -338,7 +338,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? register -publicvoidregister(Eobj) +publicvoidregister(Eobj) @@ -347,7 +347,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getProcedureManagers -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">SetEgetProcedureManagers() +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">SetEgetProcedureManagers() @@ -356,7 +356,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? loadProcedures -public abstractvoidloadProcedures(org.apache.hadoop.conf.Configurationconf) +public abstractvoidloadProcedures(org.apache.hadoop.conf.Configurationconf) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/procedure/Subprocedure.SubprocedureImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure/Subprocedure.SubprocedureImpl.html b/devapidocs/org/apache/hadoop/hbase/procedure/Subprocedure.SubprocedureImpl.html index b2ab836..f18cf00 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure/Subprocedure.SubprocedureImpl.html +++
[09/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html index 50cc17f..c270b81 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab"; -static class HRegion.WriteState +static class HRegion.WriteState extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -239,7 +239,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? flushing -volatileboolean flushing +volatileboolean flushing @@ -248,7 +248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? flushRequested -volatileboolean flushRequested +volatileboolean flushRequested @@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? compacting -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting @@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? writesEnabled -volatileboolean writesEnabled +volatileboolean writesEnabled @@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? readOnly -volatileboolean readOnly +volatileboolean readOnly @@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? readsEnabled -volatileboolean readsEnabled +volatileboolean readsEnabled @@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? HEAP_SIZE -static finallong HEAP_SIZE +static finallong HEAP_SIZE @@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? WriteState -WriteState() +WriteState() @@ -327,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? setReadOnly -voidsetReadOnly(booleanonOff) +voidsetReadOnly(booleanonOff) Set flags that make this region read-only. Parameters: @@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? isReadOnly -booleanisReadOnly() +booleanisReadOnly() @@ -350,7 +350,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? isFlushRequested -booleanisFlushRequested() +booleanisFlushRequested() @@ -359,7 +359,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? setReadsEnabled -voidsetReadsEnabled(booleanreadsEnabled) +voidsetReadsEnabled(booleanreadsEnabled)
[01/51] [partial] hbase-site git commit: Published site at .
Repository: hbase-site Updated Branches: refs/heads/asf-site 08b2580fb -> 8118541fa http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html b/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html index adf50f9..51a1acc 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html @@ -269,7 +269,7 @@ extends getSizeToCheck -protectedlonggetSizeToCheck(inttableRegionsCount) +protectedlonggetSizeToCheck(inttableRegionsCount) Overrides: getSizeToCheckin classIncreasingToUpperBoundRegionSplitPolicy
hbase git commit: HBASE-19847 Fix findbugs and error-prone warnings in hbase-thrift (branch-2)
Repository: hbase Updated Branches: refs/heads/branch-2 739b9b4a8 -> 8977aae43 HBASE-19847 Fix findbugs and error-prone warnings in hbase-thrift (branch-2) Signed-off-by: tedyuProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8977aae4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8977aae4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8977aae4 Branch: refs/heads/branch-2 Commit: 8977aae430d4e4a1fab0667452c64767b23adbca Parents: 739b9b4 Author: Peter Somogyi Authored: Tue Jan 23 14:41:54 2018 +0100 Committer: tedyu Committed: Tue Jan 23 06:49:27 2018 -0800 -- .../hadoop/hbase/thrift/IncrementCoalescer.java | 24 +++ .../hbase/thrift/TBoundedThreadPoolServer.java | 2 + .../hadoop/hbase/thrift/ThriftServerRunner.java | 2 +- .../thrift2/ThriftHBaseServiceHandler.java | 2 +- .../hadoop/hbase/thrift/TestCallQueue.java | 3 +- .../hadoop/hbase/thrift/TestThriftServer.java | 34 ++-- .../thrift2/TestThriftHBaseServiceHandler.java | 171 +-- ...TestThriftHBaseServiceHandlerWithLabels.java | 35 ++-- ...stThriftHBaseServiceHandlerWithReadOnly.java | 36 ++-- 9 files changed, 161 insertions(+), 148 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8977aae4/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java -- diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index 60a8b7f..3fb7254 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -142,6 +142,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { namePrefix = "ICV-" + poolNumber.getAndIncrement() + "-thread-"; } +@Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0); if (!t.isDaemon()) t.setDaemon(true); @@ -322,49 +323,72 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { } // MBean get/set methods + @Override public int getQueueSize() { return pool.getQueue().size(); } + + @Override public int getMaxQueueSize() { return this.maxQueueSize; } + + @Override public void setMaxQueueSize(int newSize) { this.maxQueueSize = newSize; } + @Override public long getPoolCompletedTaskCount() { return pool.getCompletedTaskCount(); } + + @Override public long getPoolTaskCount() { return pool.getTaskCount(); } + + @Override public int getPoolLargestPoolSize() { return pool.getLargestPoolSize(); } + + @Override public int getCorePoolSize() { return pool.getCorePoolSize(); } + + @Override public void setCorePoolSize(int newCoreSize) { pool.setCorePoolSize(newCoreSize); } + + @Override public int getMaxPoolSize() { return pool.getMaximumPoolSize(); } + + @Override public void setMaxPoolSize(int newMaxSize) { pool.setMaximumPoolSize(newMaxSize); } + + @Override public long getFailedIncrements() { return failedIncrements.sum(); } + @Override public long getSuccessfulCoalescings() { return successfulCoalescings.sum(); } + @Override public long getTotalIncrements() { return totalIncrements.sum(); } + @Override public long getCountersMapSize() { return countersMap.size(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/8977aae4/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java -- diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java index 732e282..df7d9c8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java @@ -161,6 +161,7 @@ public class TBoundedThreadPoolServer extends TServer { serverOptions = options; } + @Override public void serve() { try { serverTransport_.listen(); @@ -274,6 +275,7 @@ public class TBoundedThreadPoolServer extends TServer { /** * Loops on processing a client forever */ +@Override public void run() { TProcessor processor = null; TTransport inputTransport = null;
[05/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java index 189b37f..2adcf9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java @@ -82,6 +82,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili public void testAddLabels() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { "L1", SECRET, "L2", "invalid~", "L3" }; VisibilityLabelsResponse response = null; @@ -122,6 +123,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili do { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { +@Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, "ABC", "XYZ" }; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -170,6 +172,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili public void testListLabels() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public ListLabelsResponse run() throws Exception { ListLabelsResponse response = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -200,6 +203,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili public void testListLabelsWithRegEx() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public ListLabelsResponse run() throws Exception { ListLabelsResponse response = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java index 0a7d918..9d536fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java @@ -17,6 +17,17 @@ */ package org.apache.hadoop.hbase.security.visibility; +import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; @@ -42,14 +53,10 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.DefaultEnvironmentEdge; -import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -57,17 +64,6 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; -import static
[15/50] [abbrv] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations
HBASE-19564 Procedure id is missing in the response of peer related operations Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/521065e5 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/521065e5 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/521065e5 Branch: refs/heads/HBASE-19397-branch-2 Commit: 521065e544ce1ca696c43dc640138f13eb2b2243 Parents: 4482adc Author: zhangduoAuthored: Wed Dec 20 20:57:37 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../hadoop/hbase/master/MasterRpcServices.java | 24 ++-- .../master/replication/ModifyPeerProcedure.java | 4 +--- 2 files changed, 13 insertions(+), 15 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/521065e5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 8025a51..72bf2d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices public AddReplicationPeerResponse addReplicationPeer(RpcController controller, AddReplicationPeerRequest request) throws ServiceException { try { - master.addReplicationPeer(request.getPeerId(), -ReplicationPeerConfigUtil.convert(request.getPeerConfig()), request.getPeerState() -.getState().equals(ReplicationState.State.ENABLED)); - return AddReplicationPeerResponse.newBuilder().build(); + long procId = master.addReplicationPeer(request.getPeerId(), +ReplicationPeerConfigUtil.convert(request.getPeerConfig()), + request.getPeerState().getState().equals(ReplicationState.State.ENABLED)); + return AddReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, RemoveReplicationPeerRequest request) throws ServiceException { try { - master.removeReplicationPeer(request.getPeerId()); - return RemoveReplicationPeerResponse.newBuilder().build(); + long procId = master.removeReplicationPeer(request.getPeerId()); + return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, EnableReplicationPeerRequest request) throws ServiceException { try { - master.enableReplicationPeer(request.getPeerId()); - return EnableReplicationPeerResponse.newBuilder().build(); + long procId = master.enableReplicationPeer(request.getPeerId()); + return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, DisableReplicationPeerRequest request) throws ServiceException { try { - master.disableReplicationPeer(request.getPeerId()); - return DisableReplicationPeerResponse.newBuilder().build(); + long procId = master.disableReplicationPeer(request.getPeerId()); + return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, UpdateReplicationPeerConfigRequest request) throws ServiceException { try { - master.updateReplicationPeerConfig(request.getPeerId(), + long procId = master.updateReplicationPeerConfig(request.getPeerId(), ReplicationPeerConfigUtil.convert(request.getPeerConfig())); - return UpdateReplicationPeerConfigResponse.newBuilder().build(); + return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build(); } catch
[31/50] [abbrv] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly
http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b92efa/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index b6cf54d..4b9ed74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.UUID; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -32,9 +31,10 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeers; -import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface that defines a replication source @@ -47,15 +47,10 @@ public interface ReplicationSourceInterface { * @param conf the configuration to use * @param fs the file system to use * @param manager the manager to use - * @param replicationQueues - * @param replicationPeers * @param server the server for this region server - * @param peerClusterZnode - * @param clusterId - * @throws IOException */ void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueues replicationQueues, ReplicationPeers replicationPeers, Server server, + ReplicationQueueStorage queueStorage, ReplicationPeers replicationPeers, Server server, String peerClusterZnode, UUID clusterId, ReplicationEndpoint replicationEndpoint, WALFileLengthProvider walFileLengthProvider, MetricsSource metrics) throws IOException; http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b92efa/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 2f202fb..bf9cd30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -34,19 +34,21 @@ import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -60,7 +62,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; -import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationTracker; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -68,6 +70,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import
[16/50] [abbrv] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure
HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e8dbb5dd Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e8dbb5dd Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e8dbb5dd Branch: refs/heads/HBASE-19397-branch-2 Commit: e8dbb5ddba8fbaae6065e5bde88b742d22579d9a Parents: 41e57c3 Author: zhangduoAuthored: Mon Dec 18 15:22:36 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../procedure2/RemoteProcedureDispatcher.java | 3 +- .../src/main/protobuf/MasterProcedure.proto | 21 - .../src/main/protobuf/RegionServerStatus.proto | 3 +- .../src/main/protobuf/Replication.proto | 5 + .../replication/ReplicationPeersZKImpl.java | 4 +- .../org/apache/hadoop/hbase/master/HMaster.java | 98 .../hadoop/hbase/master/MasterRpcServices.java | 4 +- .../hadoop/hbase/master/MasterServices.java | 26 +++--- .../assignment/RegionTransitionProcedure.java | 13 +-- .../master/procedure/MasterProcedureEnv.java| 5 + .../master/procedure/ProcedurePrepareLatch.java | 2 +- .../master/replication/AddPeerProcedure.java| 97 +++ .../replication/DisablePeerProcedure.java | 70 ++ .../master/replication/EnablePeerProcedure.java | 69 ++ .../master/replication/ModifyPeerProcedure.java | 97 --- .../master/replication/RefreshPeerCallable.java | 67 - .../replication/RefreshPeerProcedure.java | 28 -- .../master/replication/RemovePeerProcedure.java | 69 ++ .../master/replication/ReplicationManager.java | 76 +++ .../replication/UpdatePeerConfigProcedure.java | 92 ++ .../hbase/regionserver/HRegionServer.java | 5 +- .../regionserver/RefreshPeerCallable.java | 70 ++ .../hbase/master/MockNoopMasterServices.java| 23 +++-- .../replication/DummyModifyPeerProcedure.java | 13 ++- 24 files changed, 735 insertions(+), 225 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e8dbb5dd/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java -- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 735e899..e4c57a5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcher
[32/50] [abbrv] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly
HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3b92efa Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3b92efa Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3b92efa Branch: refs/heads/HBASE-19397-branch-2 Commit: b3b92efaaad9d442c45ae56f0059aeb4e04cc176 Parents: f90fcc9 Author: zhangduoAuthored: Wed Dec 27 22:03:51 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:04 2018 +0800 -- .../hbase/replication/ReplicationFactory.java | 9 +- .../hbase/replication/ReplicationQueues.java| 160 --- .../replication/ReplicationQueuesArguments.java | 70 --- .../replication/ReplicationQueuesZKImpl.java| 407 - .../hbase/replication/ReplicationTableBase.java | 442 --- .../replication/ReplicationTrackerZKImpl.java | 21 +- .../replication/ZKReplicationQueueStorage.java | 22 + .../replication/TestReplicationStateBasic.java | 131 +++--- .../replication/TestReplicationStateZKImpl.java | 41 +- .../regionserver/DumpReplicationQueues.java | 15 +- .../RecoveredReplicationSource.java | 17 +- .../RecoveredReplicationSourceShipper.java | 22 +- .../replication/regionserver/Replication.java | 41 +- .../regionserver/ReplicationSource.java | 16 +- .../ReplicationSourceInterface.java | 11 +- .../regionserver/ReplicationSourceManager.java | 261 ++- .../regionserver/ReplicationSyncUp.java | 29 +- .../hbase/master/cleaner/TestLogsCleaner.java | 12 +- .../cleaner/TestReplicationHFileCleaner.java| 26 +- .../cleaner/TestReplicationZKNodeCleaner.java | 22 +- .../replication/ReplicationSourceDummy.java | 6 +- .../replication/TestReplicationSyncUpTool.java | 6 +- .../TestReplicationSourceManager.java | 104 ++--- .../TestReplicationSourceManagerZkImpl.java | 58 +-- 24 files changed, 381 insertions(+), 1568 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b92efa/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 6c1c213..5e70e57 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hbase.replication; -import org.apache.commons.lang3.reflect.ConstructorUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; /** * A factory class for instantiating replication objects that deal with replication state. @@ -30,12 +29,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @InterfaceAudience.Private public class ReplicationFactory { - public static ReplicationQueues getReplicationQueues(ReplicationQueuesArguments args) - throws Exception { -return (ReplicationQueues) ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class, - args); - } - public static ReplicationPeers getReplicationPeers(ZKWatcher zk, Configuration conf, Abortable abortable) { return getReplicationPeers(zk, conf, null, abortable); http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b92efa/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java deleted file mode 100644 index 7f440b1..000 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy
[39/50] [abbrv] hbase git commit: HBASE-19623 Create replication endpoint asynchronously when adding a replication source
HBASE-19623 Create replication endpoint asynchronously when adding a replication source Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e560998e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e560998e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e560998e Branch: refs/heads/HBASE-19397-branch-2 Commit: e560998e8b9b3bc5bf0d2c702580583f83fac103 Parents: 16d080c Author: zhangduoAuthored: Tue Jan 2 13:25:58 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../hbase/replication/ReplicationPeer.java | 8 ++ .../hbase/replication/ReplicationPeers.java | 18 +-- .../replication/ZKReplicationPeerStorage.java | 7 +- .../replication/TestReplicationStateBasic.java | 20 +--- .../TestZKReplicationPeerStorage.java | 14 +-- .../HBaseInterClusterReplicationEndpoint.java | 17 ++- .../RecoveredReplicationSource.java | 13 +-- .../regionserver/ReplicationSource.java | 110 +++ .../ReplicationSourceInterface.java | 8 +- .../regionserver/ReplicationSourceManager.java | 47 +--- .../client/TestAsyncReplicationAdminApi.java| 2 - .../replication/TestReplicationAdmin.java | 2 - .../replication/ReplicationSourceDummy.java | 7 +- .../replication/TestReplicationSource.java | 27 +++-- .../TestReplicationSourceManager.java | 8 +- 15 files changed, 127 insertions(+), 181 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e560998e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index 4846018..2da3cce 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -54,6 +54,14 @@ public interface ReplicationPeer { PeerState getPeerState(); /** + * Test whether the peer is enabled. + * @return {@code true} if enabled, otherwise {@code false}. + */ + default boolean isPeerEnabled() { +return getPeerState() == PeerState.ENABLED; + } + + /** * Get the peer config object * @return the ReplicationPeerConfig for this peer */ http://git-wip-us.apache.org/repos/asf/hbase/blob/e560998e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 422801b..45940a5 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import java.io.IOException; +import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -86,21 +87,6 @@ public class ReplicationPeers { } /** - * Get the peer state for the specified connected remote slave cluster. The value might be read - * from cache, so it is recommended to use {@link #peerStorage } to read storage directly if - * reading the state after enabling or disabling it. - * @param peerId a short that identifies the cluster - * @return true if replication is enabled, false otherwise. - */ - public boolean isPeerEnabled(String peerId) { -ReplicationPeer replicationPeer = this.peerCache.get(peerId); -if (replicationPeer == null) { - throw new IllegalArgumentException("Peer with id= " + peerId + " is not cached"); -} -return replicationPeer.getPeerState() == PeerState.ENABLED; - } - - /** * Returns the ReplicationPeerImpl for the specified cached peer. This ReplicationPeer will * continue to track changes to the Peer's state and config. This method returns null if no peer * has been cached with the given peerId. @@ -117,7 +103,7 @@ public class ReplicationPeers { * @return a Set of Strings for peerIds */ public Set getAllPeerIds() { -return peerCache.keySet(); +return Collections.unmodifiableSet(peerCache.keySet()); } public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,
[11/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index 65c4d08..ab282d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -117,6 +117,7 @@ public class RSProcedureDispatcher } } + @Override protected void abortPendingOperations(final ServerName serverName, final Set operations) { // TODO: Replace with a ServerNotOnlineException() @@ -126,10 +127,12 @@ public class RSProcedureDispatcher } } + @Override public void serverAdded(final ServerName serverName) { addNode(serverName); } + @Override public void serverRemoved(final ServerName serverName) { removeNode(serverName); } @@ -138,6 +141,7 @@ public class RSProcedureDispatcher * Base remote call */ protected abstract class AbstractRSRemoteCall implements Callable { +@Override public abstract Void call(); private final ServerName serverName; @@ -269,6 +273,7 @@ public class RSProcedureDispatcher this.remoteProcedures = remoteProcedures; } +@Override public Void call() { request = ExecuteProceduresRequest.newBuilder(); if (LOG.isTraceEnabled()) { @@ -290,11 +295,13 @@ public class RSProcedureDispatcher return null; } +@Override public void dispatchOpenRequests(final MasterProcedureEnv env, final List operations) { request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations)); } +@Override public void dispatchCloseRequests(final MasterProcedureEnv env, final List operations) { for (RegionCloseOperation op: operations) { @@ -471,11 +478,13 @@ public class RSProcedureDispatcher return null; } +@Override public void dispatchOpenRequests(final MasterProcedureEnv env, final List operations) { submitTask(new OpenRegionRemoteCall(serverName, operations)); } +@Override public void dispatchCloseRequests(final MasterProcedureEnv env, final List operations) { for (RegionCloseOperation op: operations) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java index a8475f0..559863e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java @@ -86,6 +86,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { return false; } + @Override public void setConf(final Configuration conf) { super.setConf(conf); try { @@ -95,6 +96,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { Path rootDir = FSUtils.getRootDir(conf); cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod, "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() { +@Override public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir); http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java index 397570c..7436d9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java @@ -54,6 +54,7 @@ public class CachedMobFile extends MobFile implements Comparable this.accessCount = accessCount; } + @Override public int compareTo(CachedMobFile that) { if (this.accessCount == that.accessCount) return 0; return this.accessCount < that.accessCount ? 1 : -1;
[27/50] [abbrv] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer
HBASE-19630 Add peer cluster key check when add new replication peer Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/78317db4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/78317db4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/78317db4 Branch: refs/heads/HBASE-19397-branch-2 Commit: 78317db4ded17bc43181a6172e3aace5e24139de Parents: dcc7793 Author: Guanghao Zhang Authored: Tue Dec 26 21:10:00 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../replication/ReplicationPeerManager.java | 54 .../replication/TestReplicationAdmin.java | 22 2 files changed, 54 insertions(+), 22 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/78317db4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 84abfeb..b78cbce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.replication; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -216,36 +218,36 @@ public final class ReplicationPeerManager { return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty(); } - /** - * If replicate_all flag is true, it means all user tables will be replicated to peer cluster. - * Then allow config exclude namespaces or exclude table-cfs which can't be replicated to peer - * cluster. - * - * If replicate_all flag is false, it means all user tables can't be replicated to peer cluster. - * Then allow to config namespaces or table-cfs which will be replicated to peer cluster. - */ - private static void checkPeerConfig(ReplicationPeerConfig peerConfig) - throws DoNotRetryIOException { + private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws DoNotRetryIOException { +checkClusterKey(peerConfig.getClusterKey()); + if (peerConfig.replicateAllUserTables()) { - if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) || -(peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) { -throw new DoNotRetryIOException("Need clean namespaces or table-cfs config firstly " + - "when you want replicate all cluster"); + // If replicate_all flag is true, it means all user tables will be replicated to peer cluster. + // Then allow config exclude namespaces or exclude table-cfs which can't be replicated to peer + // cluster. + if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) + || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) { +throw new DoNotRetryIOException("Need clean namespaces or table-cfs config firstly " ++ "when you want replicate all cluster"); } checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(), peerConfig.getExcludeTableCFsMap()); } else { - if ((peerConfig.getExcludeNamespaces() != null && -!peerConfig.getExcludeNamespaces().isEmpty()) || -(peerConfig.getExcludeTableCFsMap() != null && - !peerConfig.getExcludeTableCFsMap().isEmpty())) { + // If replicate_all flag is false, it means all user tables can't be replicated to peer + // cluster. Then allow to config namespaces or table-cfs which will be replicated to peer + // cluster. + if ((peerConfig.getExcludeNamespaces() != null + && !peerConfig.getExcludeNamespaces().isEmpty()) + || (peerConfig.getExcludeTableCFsMap() != null + && !peerConfig.getExcludeTableCFsMap().isEmpty())) { throw new DoNotRetryIOException( -"Need
[30/50] [abbrv] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure
HBASE-19592 Add UTs to test retry on update zk failure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2417c32c Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2417c32c Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2417c32c Branch: refs/heads/HBASE-19397-branch-2 Commit: 2417c32cebd7633c88b44a314dc65b2221759092 Parents: 78317db Author: zhangduoAuthored: Tue Dec 26 20:39:00 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../replication/ReplicationPeerManager.java | 5 +- .../TestReplicationProcedureRetry.java | 200 +++ 2 files changed, 202 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/2417c32c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index b78cbce..f4ccce8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience; * Used to add/remove a replication peer. */ @InterfaceAudience.Private -public final class ReplicationPeerManager { +public class ReplicationPeerManager { private final ReplicationPeerStorage peerStorage; @@ -61,8 +61,7 @@ public final class ReplicationPeerManager { private final ConcurrentMap peers; - private ReplicationPeerManager(ReplicationPeerStorage peerStorage, - ReplicationQueueStorage queueStorage, + ReplicationPeerManager(ReplicationPeerStorage peerStorage, ReplicationQueueStorage queueStorage, ConcurrentMap peers) { this.peerStorage = peerStorage; this.queueStorage = queueStorage; http://git-wip-us.apache.org/repos/asf/hbase/blob/2417c32c/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java new file mode 100644 index 000..ab35b46 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +
[47/50] [abbrv] hbase git commit: HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs
HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4687c90 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4687c90 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4687c90 Branch: refs/heads/HBASE-19397-branch-2 Commit: e4687c9055b93e4e5001c54276f39c0f72cced00 Parents: b0e88ed Author: Guanghao Zhang Authored: Fri Jan 5 15:39:06 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:20:38 2018 +0800 -- .../procedure/MasterProcedureScheduler.java | 23 1 file changed, 19 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e4687c90/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index 4ecb3b1..0400de4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -402,7 +402,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { @Override public void completionCleanup(final Procedure proc) { if (proc instanceof TableProcedureInterface) { - TableProcedureInterface iProcTable = (TableProcedureInterface)proc; + TableProcedureInterface iProcTable = (TableProcedureInterface) proc; boolean tableDeleted; if (proc.hasException()) { Exception procEx = proc.getException().unwrapRemoteException(); @@ -423,9 +423,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { } } else if (proc instanceof PeerProcedureInterface) { PeerProcedureInterface iProcPeer = (PeerProcedureInterface) proc; - if (iProcPeer.getPeerOperationType() == PeerOperationType.REMOVE) { -removePeerQueue(iProcPeer.getPeerId()); - } + tryCleanupPeerQueue(iProcPeer.getPeerId(), proc); } else { // No cleanup for ServerProcedureInterface types, yet. return; @@ -514,6 +512,23 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { locking.removePeerLock(peerId); } + private void tryCleanupPeerQueue(String peerId, Procedure procedure) { +schedLock(); +try { + PeerQueue queue = AvlTree.get(peerMap, peerId, PEER_QUEUE_KEY_COMPARATOR); + if (queue == null) { +return; + } + + final LockAndQueue lock = locking.getPeerLock(peerId); + if (queue.isEmpty() && lock.tryExclusiveLock(procedure)) { +removeFromRunQueue(peerRunQueue, queue); +removePeerQueue(peerId); + } +} finally { + schedUnlock(); +} + } private static boolean isPeerProcedure(Procedure proc) { return proc instanceof PeerProcedureInterface;
[36/50] [abbrv] hbase git commit: HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer
HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16d080c1 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16d080c1 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16d080c1 Branch: refs/heads/HBASE-19397-branch-2 Commit: 16d080c1ad044a4eb1043e78263630bb36f46bf9 Parents: a791c23 Author: zhangduoAuthored: Tue Jan 2 09:57:23 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../replication/ReplicationPeerConfig.java | 2 +- .../replication/VerifyReplication.java | 34 ++--- .../hbase/replication/ReplicationPeers.java | 32 ++-- .../replication/ZKReplicationQueueStorage.java | 3 +- .../replication/ZKReplicationStorageBase.java | 4 +- .../replication/TestReplicationStateBasic.java | 10 + .../master/replication/AddPeerProcedure.java| 5 +-- .../replication/DisablePeerProcedure.java | 3 +- .../master/replication/EnablePeerProcedure.java | 3 +- .../master/replication/ModifyPeerProcedure.java | 34 + .../replication/RefreshPeerProcedure.java | 17 - .../master/replication/RemovePeerProcedure.java | 7 ++-- .../replication/ReplicationPeerManager.java | 31 +++- .../replication/UpdatePeerConfigProcedure.java | 3 +- .../RemoteProcedureResultReporter.java | 3 +- .../regionserver/RefreshPeerCallable.java | 5 ++- .../regionserver/ReplicationSourceManager.java | 39 +++- .../TestReplicationAdminUsingProcedure.java | 7 ++-- 18 files changed, 124 insertions(+), 118 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/16d080c1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index b80ee16..fdae288 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -27,8 +27,8 @@ import java.util.Set; import java.util.TreeMap; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A configuration for the replication peer cluster. http://git-wip-us.apache.org/repos/asf/hbase/blob/16d080c1/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index f0070f0..fe45762 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.mapreduce.replication; import java.io.IOException; import java.util.Arrays; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -45,13 +44,14 @@ import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; -import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapper; +import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; -import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationPeers; +import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -66,6 +66,7 @@ import org.apache.hadoop.util.Tool; import
[46/50] [abbrv] hbase git commit: HBASE-19719 Fix checkstyle issues
HBASE-19719 Fix checkstyle issues Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2aff1edf Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2aff1edf Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2aff1edf Branch: refs/heads/HBASE-19397-branch-2 Commit: 2aff1edf39d161bcbf3a3d332dedb1ad809ccf4e Parents: e4687c9 Author: zhangduoAuthored: Sat Jan 6 08:30:55 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:20:38 2018 +0800 -- .../hbase/replication/ReplicationStorageFactory.java | 2 +- .../master/assignment/RegionTransitionProcedure.java | 4 ++-- .../hbase/master/procedure/RSProcedureDispatcher.java | 13 ++--- .../master/ReplicationPeerConfigUpgrader.java | 8 4 files changed, 13 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/2aff1edf/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java index 60d0749..462cfed 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java @@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; * For now we only have zk based implementation. */ @InterfaceAudience.Private -public class ReplicationStorageFactory { +public final class ReplicationStorageFactory { private ReplicationStorageFactory() { } http://git-wip-us.apache.org/repos/asf/hbase/blob/2aff1edf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java index 1724a38..8277dbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java @@ -36,11 +36,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Base class for the Assign and Unassign Procedure. * http://git-wip-us.apache.org/repos/asf/hbase/blob/2aff1edf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index eb4680c..bfdb682 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; @@ -36,6 +35,12 @@ import org.apache.hadoop.ipc.RemoteException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; @@ -47,12 +52,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionR
[18/50] [abbrv] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure
HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4482adc0 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4482adc0 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4482adc0 Branch: refs/heads/HBASE-19397-branch-2 Commit: 4482adc00fd429d6c84d939b8e83484a5a00b5b1 Parents: e8dbb5d Author: Guanghao Zhang Authored: Tue Dec 19 15:50:57 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../org/apache/hadoop/hbase/client/Admin.java | 87 ++- .../apache/hadoop/hbase/client/HBaseAdmin.java | 149 ++- .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 82 +- 3 files changed, 238 insertions(+), 80 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/4482adc0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 40dac2f..b8546fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2473,7 +2473,7 @@ public interface Admin extends Abortable, Closeable { /** * Add a new replication peer for replicating data to slave cluster. * @param peerId a short name that identifies the peer - * @param peerConfig configuration for the replication slave cluster + * @param peerConfig configuration for the replication peer * @throws IOException if a remote or network exception occurs */ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) @@ -2484,7 +2484,7 @@ public interface Admin extends Abortable, Closeable { /** * Add a new replication peer for replicating data to slave cluster. * @param peerId a short name that identifies the peer - * @param peerConfig configuration for the replication slave cluster + * @param peerConfig configuration for the replication peer * @param enabled peer state, true if ENABLED and false if DISABLED * @throws IOException if a remote or network exception occurs */ @@ -2492,6 +2492,37 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Add a new replication peer but does not block and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. + * @param peerId a short name that identifies the peer + * @param peerConfig configuration for the replication peer + * @return the result of the async operation + * @throws IOException IOException if a remote or network exception occurs + */ + default Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig) + throws IOException { +return addReplicationPeerAsync(peerId, peerConfig, true); + } + + /** + * Add a new replication peer but does not block and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. + * @param peerId a short name that identifies the peer + * @param peerConfig configuration for the replication peer + * @param enabled peer state, true if ENABLED and false if DISABLED + * @return the result of the async operation + * @throws IOException IOException if a remote or network exception occurs + */ + Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) throws IOException; + + /** * Remove a peer and stop the replication. * @param peerId a short name that identifies the peer * @throws IOException if a remote or network exception occurs @@ -2499,6 +2530,18 @@ public interface Admin extends Abortable, Closeable { void removeReplicationPeer(String peerId) throws IOException; /** + * Remove a replication peer but does not block and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to
[24/50] [abbrv] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a754e99/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 6e27a21..d8f9625 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -21,13 +21,13 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; @@ -48,17 +48,18 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationQueues; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; +import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.ReplicationTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap; /** @@ -303,57 +304,53 @@ public class DumpReplicationQueues extends Configured implements Tool { } public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, Set peerIds, - boolean hdfs) throws Exception { -ReplicationQueuesClient queuesClient; + boolean hdfs) throws Exception { +ReplicationQueueStorage queueStorage; ReplicationPeers replicationPeers; ReplicationQueues replicationQueues; ReplicationTracker replicationTracker; -ReplicationQueuesClientArguments replicationArgs = -new ReplicationQueuesClientArguments(getConf(), new WarnOnlyAbortable(), zkw); +ReplicationQueuesArguments replicationArgs = +new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), zkw); StringBuilder sb = new StringBuilder(); -queuesClient = ReplicationFactory.getReplicationQueuesClient(replicationArgs); -queuesClient.init(); +queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); replicationQueues = ReplicationFactory.getReplicationQueues(replicationArgs); -replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), queuesClient, connection); +replicationPeers = +ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, connection); replicationTracker = ReplicationFactory.getReplicationTracker(zkw, replicationPeers, getConf(), new WarnOnlyAbortable(), new WarnOnlyStoppable()); -List liveRegionServers = replicationTracker.getListOfRegionServers(); +Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); // Loops each peer on each RS and dumps the queues -try { - List regionservers = queuesClient.getListOfReplicators(); - if (regionservers == null || regionservers.isEmpty()) { -return sb.toString(); +List regionservers = queueStorage.getListOfReplicators(); +if (regionservers == null || regionservers.isEmpty()) { + return sb.toString(); +} +for (ServerName regionserver : regionservers) { + List queueIds = queueStorage.getAllQueues(regionserver); + replicationQueues.init(regionserver.getServerName()); + if (!liveRegionServers.contains(regionserver.getServerName())) { +deadRegionServers.add(regionserver.getServerName()); } - for (String regionserver : regionservers) { -List queueIds = queuesClient.getAllQueues(regionserver); -replicationQueues.init(regionserver); -if (!liveRegionServers.contains(regionserver)) { - deadRegionServers.add(regionserver); -} -for (String
[03/50] [abbrv] hbase git commit: HBASE-19840 Flakey TestMetaWithReplicas
HBASE-19840 Flakey TestMetaWithReplicas Adding debuging Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e5fc1ed Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e5fc1ed Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e5fc1ed Branch: refs/heads/HBASE-19397-branch-2 Commit: 1e5fc1ed63039390ecd5d3a45c96232338e7ae83 Parents: 0fa24dd Author: Michael StackAuthored: Mon Jan 22 12:24:52 2018 -0800 Committer: Michael Stack Committed: Mon Jan 22 12:26:03 2018 -0800 -- .../hadoop/hbase/client/TestMetaWithReplicas.java| 15 --- 1 file changed, 12 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/1e5fc1ed/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index e01c28f..7099408 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -30,6 +30,8 @@ import java.util.Collection; import java.util.EnumSet; import java.util.List; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CategoryBasedTimeout; @@ -93,14 +95,14 @@ public class TestMetaWithReplicas { // disable the balancer LoadBalancerTracker l = new LoadBalancerTracker(TEST_UTIL.getZooKeeperWatcher(), new Abortable() { - boolean aborted = false; + AtomicBoolean aborted = new AtomicBoolean(false); @Override public boolean isAborted() { -return aborted; +return aborted.get(); } @Override public void abort(String why, Throwable e) { -aborted = true; +aborted.set(true); } }); l.setBalancerOn(false); @@ -174,6 +176,7 @@ public class TestMetaWithReplicas { conf.get("zookeeper.znode.metaserver", "meta-region-server")); byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); ServerName primary = ProtobufUtil.toServerName(data); +LOG.info("Primary=" + primary.toString()); TableName TABLE = TableName.valueOf("testShutdownHandling"); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; @@ -208,14 +211,17 @@ public class TestMetaWithReplicas { master = util.getHBaseClusterInterface().getClusterMetrics().getMasterName(); // kill the master so that regionserver recovery is not triggered at all // for the meta server +LOG.info("Stopping master=" + master.toString()); util.getHBaseClusterInterface().stopMaster(master); util.getHBaseClusterInterface().waitForMasterToStop(master, 6); +LOG.info("Master stopped!"); if (!master.equals(primary)) { util.getHBaseClusterInterface().killRegionServer(primary); util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 6); } ((ClusterConnection)c).clearRegionCache(); } + LOG.info("Running GETs"); Get get = null; Result r = null; byte[] row = "test".getBytes(); @@ -231,12 +237,15 @@ public class TestMetaWithReplicas { assertTrue(Arrays.equals(r.getRow(), row)); // now start back the killed servers and disable use of replicas. That would mean // calls go to the primary +LOG.info("Starting Master"); util.getHBaseClusterInterface().startMaster(master.getHostname(), 0); util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0); util.getHBaseClusterInterface().waitForActiveAndReadyMaster(); +LOG.info("Master active!"); ((ClusterConnection)c).clearRegionCache(); } conf.setBoolean(HConstants.USE_META_REPLICAS, false); + LOG.info("Running GETs no replicas"); try (Table htable = c.getTable(TABLE);) { r = htable.get(get); assertTrue(Arrays.equals(r.getRow(), row));
[42/50] [abbrv] hbase git commit: HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore
HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba414a70 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba414a70 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba414a70 Branch: refs/heads/HBASE-19397-branch-2 Commit: ba414a70922aa80a26e4751daf9d951b8391c87f Parents: 4caceda Author: zhangduoAuthored: Wed Jan 3 09:39:44 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../replication/VerifyReplication.java | 6 +- .../hbase/replication/ReplicationPeers.java | 26 +-- .../hbase/replication/ReplicationUtils.java | 69 +++ .../replication/TestReplicationStateBasic.java | 2 +- .../org/apache/hadoop/hbase/master/HMaster.java | 13 -- .../cleaner/ReplicationZKNodeCleaner.java | 192 --- .../cleaner/ReplicationZKNodeCleanerChore.java | 54 -- .../replication/ReplicationPeerManager.java | 18 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 13 +- .../hbase/util/hbck/ReplicationChecker.java | 109 +++ .../cleaner/TestReplicationZKNodeCleaner.java | 109 --- .../hbase/util/TestHBaseFsckReplication.java| 101 ++ .../hadoop/hbase/util/hbck/HbckTestingUtil.java | 6 +- 13 files changed, 259 insertions(+), 459 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/ba414a70/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index fe45762..fac4875 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; -import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; +import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -345,10 +345,10 @@ public class VerifyReplication extends Configured implements Tool { } }); ReplicationPeerStorage storage = - ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); +ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId); return Pair.newPair(peerConfig, -ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf)); +ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); } catch (ReplicationException e) { throw new IOException("An error occurred while trying to connect to the remove peer cluster", e); http://git-wip-us.apache.org/repos/asf/hbase/blob/ba414a70/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 45940a5..fcbc350 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -17,14 +17,11 @@ */ package org.apache.hadoop.hbase.replication; -import java.io.IOException; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -106,25 +103,6 @@ public class ReplicationPeers { return
[25/50] [abbrv] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0a754e99 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0a754e99 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0a754e99 Branch: refs/heads/HBASE-19397-branch-2 Commit: 0a754e998b2c5c43485c848cf0340254e0924179 Parents: 7213a0d Author: zhangduoAuthored: Mon Dec 25 18:49:56 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../hbase/replication/ReplicationFactory.java | 19 +- .../replication/ReplicationPeersZKImpl.java | 21 +- .../replication/ReplicationQueueStorage.java| 26 +- .../replication/ReplicationQueuesClient.java| 93 - .../ReplicationQueuesClientArguments.java | 40 -- .../ReplicationQueuesClientZKImpl.java | 176 - .../replication/ZKReplicationQueueStorage.java | 90 - .../replication/TestReplicationStateBasic.java | 378 +++ .../replication/TestReplicationStateZKImpl.java | 148 .../TestZKReplicationQueueStorage.java | 74 .../cleaner/ReplicationZKNodeCleaner.java | 71 ++-- .../cleaner/ReplicationZKNodeCleanerChore.java | 5 +- .../replication/ReplicationPeerManager.java | 31 +- .../master/ReplicationHFileCleaner.java | 109 ++ .../master/ReplicationLogCleaner.java | 35 +- .../regionserver/DumpReplicationQueues.java | 78 ++-- .../hbase/util/hbck/ReplicationChecker.java | 14 +- .../client/TestAsyncReplicationAdminApi.java| 31 +- .../replication/TestReplicationAdmin.java | 2 + .../hbase/master/cleaner/TestLogsCleaner.java | 30 +- .../cleaner/TestReplicationHFileCleaner.java| 59 +-- .../cleaner/TestReplicationZKNodeCleaner.java | 12 +- .../replication/TestReplicationStateBasic.java | 378 --- .../replication/TestReplicationStateZKImpl.java | 227 --- .../TestReplicationSourceManagerZkImpl.java | 84 ++--- 25 files changed, 905 insertions(+), 1326 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0a754e99/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 9f4ad18..6c1c213 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,20 +36,14 @@ public class ReplicationFactory { args); } - public static ReplicationQueuesClient - getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws Exception { -return (ReplicationQueuesClient) ConstructorUtils -.invokeConstructor(ReplicationQueuesClientZKImpl.class, args); - } - - public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf, - Abortable abortable) { + public static ReplicationPeers getReplicationPeers(ZKWatcher zk, Configuration conf, + Abortable abortable) { return getReplicationPeers(zk, conf, null, abortable); } - public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf, - final ReplicationQueuesClient queuesClient, Abortable abortable) { -return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable); + public static ReplicationPeers getReplicationPeers(ZKWatcher zk, Configuration conf, + ReplicationQueueStorage queueStorage, Abortable abortable) { +return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable); } public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, http://git-wip-us.apache.org/repos/asf/hbase/blob/0a754e99/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 8ed0150..7de4619 100644 ---
[23/50] [abbrv] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure
HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f8fa2fc5 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f8fa2fc5 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f8fa2fc5 Branch: refs/heads/HBASE-19397-branch-2 Commit: f8fa2fc5160ab60ffaba1cdf2fd6d5f3e35ee88a Parents: bcb79d5 Author: huzhengAuthored: Wed Dec 20 10:47:18 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../hadoop/hbase/protobuf/ProtobufUtil.java | 11 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 13 +- .../hbase/replication/ReplicationListener.java | 14 -- .../hbase/replication/ReplicationPeer.java | 28 ++- .../replication/ReplicationPeerZKImpl.java | 186 --- .../replication/ReplicationPeersZKImpl.java | 19 +- .../replication/ReplicationTrackerZKImpl.java | 73 +- .../regionserver/ReplicationSourceService.java | 9 +- .../handler/RSProcedureHandler.java | 3 + .../replication/BaseReplicationEndpoint.java| 2 +- .../regionserver/PeerProcedureHandler.java | 38 .../regionserver/PeerProcedureHandlerImpl.java | 81 +++ .../regionserver/RefreshPeerCallable.java | 39 +++- .../replication/regionserver/Replication.java | 10 + .../regionserver/ReplicationSource.java | 8 +- .../regionserver/ReplicationSourceManager.java | 37 ++- .../TestReplicationAdminUsingProcedure.java | 226 +++ .../replication/DummyModifyPeerProcedure.java | 48 .../TestDummyModifyPeerProcedure.java | 80 --- .../TestReplicationTrackerZKImpl.java | 51 - .../TestReplicationSourceManager.java | 32 ++- 21 files changed, 535 insertions(+), 473 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f8fa2fc5/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 9739254..f500088 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.protobuf; +import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC; + import com.google.protobuf.ByteString; import com.google.protobuf.CodedInputStream; import com.google.protobuf.InvalidProtocolBufferException; @@ -203,7 +205,7 @@ public final class ProtobufUtil { * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ public static byte [] prependPBMagic(final byte [] bytes) { -return Bytes.add(ProtobufMagic.PB_MAGIC, bytes); +return Bytes.add(PB_MAGIC, bytes); } /** @@ -228,10 +230,11 @@ public final class ProtobufUtil { * @param bytes bytes to check * @throws DeserializationException if we are missing the pb magic prefix */ - public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException { + public static void expectPBMagicPrefix(final byte[] bytes) throws DeserializationException { if (!isPBMagicPrefix(bytes)) { - throw new DeserializationException("Missing pb magic " + - Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix"); + String bytesPrefix = bytes == null ? "null" : Bytes.toStringBinary(bytes, 0, PB_MAGIC.length); + throw new DeserializationException( + "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " + bytesPrefix); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/f8fa2fc5/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b26802f..5e6b3db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.shaded.protobuf; +import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; @@ -280,7 +282,7 @@ public final class ProtobufUtil { * byte array
[12/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2) Signed-off-by: Michael StackProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1269ec5 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1269ec5 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1269ec5 Branch: refs/heads/HBASE-19397-branch-2 Commit: b1269ec57ff77f4077516fb4f866e2fe7ede3a3e Parents: 1e5fc1e Author: Peter Somogyi Authored: Mon Jan 22 15:32:06 2018 +0100 Committer: Michael Stack Committed: Mon Jan 22 17:14:15 2018 -0800 -- .../hbase/SslRMIServerSocketFactorySecure.java | 1 + .../hadoop/hbase/client/locking/EntityLock.java | 1 + .../hadoop/hbase/conf/ConfigurationManager.java | 2 +- .../ZkSplitLogWorkerCoordination.java | 2 +- .../coprocessor/BaseRowProcessorEndpoint.java | 8 +- .../hbase/coprocessor/CoprocessorHost.java | 1 + .../hbase/coprocessor/ObserverContextImpl.java | 3 + .../coprocessor/ReadOnlyConfiguration.java | 14 +- .../hadoop/hbase/filter/FilterWrapper.java | 1 + .../org/apache/hadoop/hbase/fs/HFileSystem.java | 83 ++-- .../hbase/io/FSDataInputStreamWrapper.java | 11 +- .../hadoop/hbase/io/HalfStoreFileReader.java| 8 + .../org/apache/hadoop/hbase/io/Reference.java | 1 + .../asyncfs/FanOutOneBlockAsyncDFSOutput.java | 1 + .../hadoop/hbase/io/hfile/BlockCache.java | 1 + .../hbase/io/hfile/CompoundBloomFilter.java | 1 + .../hadoop/hbase/io/hfile/FixedFileTrailer.java | 10 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 68 +-- .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 2 +- .../hadoop/hbase/io/hfile/HFileScanner.java | 1 + .../hadoop/hbase/io/hfile/LruBlockCache.java| 4 + .../hadoop/hbase/io/hfile/LruCachedBlock.java | 1 + .../hbase/io/hfile/LruCachedBlockQueue.java | 1 + .../hbase/io/hfile/bucket/BucketAllocator.java | 1 + .../hbase/io/hfile/bucket/BucketCache.java | 18 +- .../hbase/io/hfile/bucket/CachedEntryQueue.java | 1 + .../apache/hadoop/hbase/ipc/BufferChain.java| 2 +- .../ipc/FastPathBalancedQueueRpcExecutor.java | 1 + .../hadoop/hbase/ipc/ServerRpcConnection.java | 5 +- .../hadoop/hbase/ipc/SimpleRpcServer.java | 1 + .../hbase/master/ClusterStatusPublisher.java| 9 +- .../apache/hadoop/hbase/master/DeadServer.java | 1 + .../hadoop/hbase/master/HMasterCommandLine.java | 3 +- .../hadoop/hbase/master/LoadBalancer.java | 1 + ...MasterAnnotationReadingPriorityFunction.java | 1 + .../hbase/master/MasterCoprocessorHost.java | 1 + .../hadoop/hbase/master/MobCompactionChore.java | 2 +- .../hbase/master/RegionServerTracker.java | 2 +- .../hadoop/hbase/master/SplitLogManager.java| 2 +- .../assignment/MergeTableRegionsProcedure.java | 2 +- .../hbase/master/assignment/RegionStates.java | 7 +- .../balancer/FavoredStochasticBalancer.java | 5 +- .../master/balancer/RegionLocationFinder.java | 27 +- .../master/balancer/SimpleLoadBalancer.java | 1 + .../master/balancer/StochasticLoadBalancer.java | 6 +- .../hbase/master/cleaner/CleanerChore.java | 4 +- .../hbase/master/cleaner/HFileCleaner.java | 2 +- .../hadoop/hbase/master/cleaner/LogCleaner.java | 2 +- .../hbase/master/locking/LockProcedure.java | 2 +- .../master/procedure/ProcedurePrepareLatch.java | 4 + .../master/procedure/RSProcedureDispatcher.java | 9 + .../master/snapshot/SnapshotHFileCleaner.java | 2 + .../apache/hadoop/hbase/mob/CachedMobFile.java | 1 + .../hadoop/hbase/mob/ExpiredMobFileCleaner.java | 1 + .../PartitionedMobCompactionRequest.java| 1 + .../monitoring/MonitoredRPCHandlerImpl.java | 12 + .../hbase/monitoring/MonitoredTaskImpl.java | 1 + .../hadoop/hbase/monitoring/TaskMonitor.java| 2 +- .../hadoop/hbase/procedure/Procedure.java | 2 +- .../hbase/procedure/ProcedureManagerHost.java | 6 +- .../hadoop/hbase/procedure/Subprocedure.java| 1 + .../hbase/procedure/ZKProcedureCoordinator.java | 12 +- .../hbase/procedure/ZKProcedureMemberRpcs.java | 1 + .../hadoop/hbase/procedure/ZKProcedureUtil.java | 1 + .../hadoop/hbase/quotas/MasterQuotaManager.java | 4 +- .../hadoop/hbase/quotas/QuotaObserverChore.java | 3 +- .../apache/hadoop/hbase/quotas/RateLimiter.java | 1 + .../quotas/RegionServerSpaceQuotaManager.java | 2 +- .../SpaceQuotaSnapshotNotifierFactory.java | 4 +- .../hbase/regionserver/AbstractMemStore.java| 2 +- .../AdaptiveMemStoreCompactionStrategy.java | 3 + .../regionserver/CellChunkImmutableSegment.java | 2 +- .../hadoop/hbase/regionserver/CellSet.java | 30 ++ .../hbase/regionserver/CompactingMemStore.java | 4 +-
[35/50] [abbrv] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface
HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a791c238 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a791c238 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a791c238 Branch: refs/heads/HBASE-19397-branch-2 Commit: a791c23828731c91feac831afba97943543368cf Parents: 5245829 Author: huzhengAuthored: Tue Dec 26 16:46:10 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../replication/ReplicationPeerConfigUtil.java | 10 +- .../replication/VerifyReplication.java | 9 +- .../hbase/replication/ReplicationFactory.java | 10 +- .../hbase/replication/ReplicationPeerImpl.java | 60 +- .../replication/ReplicationPeerStorage.java | 3 +- .../hbase/replication/ReplicationPeers.java | 238 .../replication/ReplicationPeersZKImpl.java | 552 --- .../replication/ZKReplicationPeerStorage.java | 12 +- .../replication/ZKReplicationStorageBase.java | 3 +- .../replication/TestReplicationStateBasic.java | 125 ++--- .../replication/TestReplicationStateZKImpl.java | 2 +- .../TestZKReplicationPeerStorage.java | 12 +- .../cleaner/ReplicationZKNodeCleaner.java | 57 +- .../replication/ReplicationPeerManager.java | 6 +- .../regionserver/DumpReplicationQueues.java | 2 +- .../regionserver/PeerProcedureHandlerImpl.java | 49 +- .../replication/regionserver/Replication.java | 2 +- .../regionserver/ReplicationSource.java | 7 +- .../regionserver/ReplicationSourceManager.java | 45 +- .../cleaner/TestReplicationHFileCleaner.java| 7 +- .../replication/TestMultiSlaveReplication.java | 2 - .../TestReplicationTrackerZKImpl.java | 36 +- .../TestReplicationSourceManager.java | 17 +- .../hadoop/hbase/HBaseZKTestingUtility.java | 3 +- 24 files changed, 308 insertions(+), 961 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/a791c238/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index 022bf64..a234a9b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -247,22 +247,22 @@ public final class ReplicationPeerConfigUtil { public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); + int pbLen = ProtobufUtil.lengthOfPBMagic(); ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder(); ReplicationProtos.ReplicationPeer peer; try { -ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); +ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen); peer = builder.build(); } catch (IOException e) { throw new DeserializationException(e); } return convert(peer); } else { - if (bytes.length > 0) { -return ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build(); + if (bytes == null || bytes.length <= 0) { +throw new DeserializationException("Bytes to deserialize should not be empty."); } - return ReplicationPeerConfig.newBuilder().setClusterKey("").build(); + return ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build(); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a791c238/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 09d4b4b..f0070f0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -339,15 +339,10 @@ public class VerifyReplication extends Configured implements
[09/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index dcccfd1..91f7971 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; -import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -204,14 +203,14 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { rs -> { ServerName serverName = rs.getServerName(); try { - Assert.assertEquals(admin.getRegions(serverName).get().size(), rs + assertEquals(admin.getRegions(serverName).get().size(), rs .getRegions().size()); } catch (Exception e) { fail("admin.getOnlineRegions() method throws a exception: " + e.getMessage()); } regionServerCount.incrementAndGet(); }); -Assert.assertEquals(regionServerCount.get(), 2); +assertEquals(2, regionServerCount.get()); } @Test @@ -229,7 +228,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-1"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); // flush region and wait flush operation finished. LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName())); admin.flushRegion(hri.getRegionName()).get(); @@ -239,20 +238,20 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); +assertEquals(0, regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()); // write another put into the specific region ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-2"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); admin.flush(tableName).get(); Threads.sleepWithoutInterrupt(500); while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); +assertEquals(0, regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()); } @Test @@ -421,7 +420,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { LOG.error(e.toString(), e); } } -assertEquals(count, 2); +assertEquals(2, count); } private void waitUntilMobCompactionFinished(TableName tableName) @@ -471,23 +470,23 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { .map(rsThread -> rsThread.getRegionServer()).collect(Collectors.toList()); List regions = new ArrayList<>(); rsList.forEach(rs -> regions.addAll(rs.getRegions(tableName))); -Assert.assertEquals(regions.size(), 1); +assertEquals(1, regions.size()); int countBefore = countStoreFilesInFamilies(regions, families); -Assert.assertTrue(countBefore > 0); +assertTrue(countBefore > 0); // Minor compaction for all region servers. for (HRegionServer rs : rsList) admin.compactRegionServer(rs.getServerName()).get(); Thread.sleep(5000); int countAfterMinorCompaction = countStoreFilesInFamilies(regions, families); -Assert.assertTrue(countAfterMinorCompaction < countBefore); +assertTrue(countAfterMinorCompaction < countBefore); // Major compaction for all region servers. for (HRegionServer rs : rsList) admin.majorCompactRegionServer(rs.getServerName()).get(); Thread.sleep(5000); int countAfterMajorCompaction = countStoreFilesInFamilies(regions, families); -Assert.assertEquals(countAfterMajorCompaction, 3); +assertEquals(3,
[44/50] [abbrv] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished
HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aab18b45 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aab18b45 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aab18b45 Branch: refs/heads/HBASE-19397-branch-2 Commit: aab18b45ba20cea51df861914c12b87755008f7d Parents: 6bd7fd8 Author: Guanghao Zhang Authored: Thu Jan 4 16:58:01 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../replication/ReplicationPeerConfig.java | 1 - .../hbase/replication/ReplicationPeerImpl.java | 4 +- .../hbase/replication/ReplicationQueueInfo.java | 23 +- .../hbase/replication/ReplicationUtils.java | 56 ++ .../replication/TestReplicationStateZKImpl.java | 22 - .../regionserver/ReplicationSourceService.java | 3 +- .../regionserver/PeerProcedureHandler.java | 3 + .../regionserver/PeerProcedureHandlerImpl.java | 50 +- .../RecoveredReplicationSource.java | 6 +- .../RecoveredReplicationSourceShipper.java | 8 +- .../replication/regionserver/Replication.java | 15 +- .../regionserver/ReplicationSource.java | 34 +- .../regionserver/ReplicationSourceFactory.java | 4 +- .../ReplicationSourceInterface.java | 8 +- .../regionserver/ReplicationSourceManager.java | 895 ++- .../regionserver/ReplicationSourceShipper.java | 6 +- .../ReplicationSourceWALReader.java | 2 +- .../replication/ReplicationSourceDummy.java | 2 +- .../replication/TestNamespaceReplication.java | 57 +- .../TestReplicationSourceManager.java | 11 +- .../TestReplicationSourceManagerZkImpl.java | 1 - 21 files changed, 659 insertions(+), 552 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/aab18b45/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index fdae288..bf8d030 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; http://git-wip-us.apache.org/repos/asf/hbase/blob/aab18b45/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 3e17025..604e0bb 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ReplicationPeerImpl implements ReplicationPeer { + private final Configuration conf; private final String id; http://git-wip-us.apache.org/repos/asf/hbase/blob/aab18b45/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index ecd888f..cd65f9b 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.ServerName; /** - * This class is responsible for the parsing logic for a znode representing a queue. + * This class is
[49/50] [abbrv] hbase git commit: HBASE-19783 Change replication peer cluster key/endpoint from a not-null value to null is not allowed
HBASE-19783 Change replication peer cluster key/endpoint from a not-null value to null is not allowed Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7aaad6f4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7aaad6f4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7aaad6f4 Branch: refs/heads/HBASE-19397-branch-2 Commit: 7aaad6f4c024019c12e47d380b135b00feed596d Parents: 2198683 Author: Guanghao Zhang Authored: Fri Jan 12 22:04:38 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:20:38 2018 +0800 -- .../replication/ReplicationPeerManager.java | 28 +--- 1 file changed, 19 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/7aaad6f4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 696b2d7..19fc7f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -132,20 +132,19 @@ public class ReplicationPeerManager { checkPeerConfig(peerConfig); ReplicationPeerDescription desc = checkPeerExists(peerId); ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig(); -if (!StringUtils.isBlank(peerConfig.getClusterKey()) && - !peerConfig.getClusterKey().equals(oldPeerConfig.getClusterKey())) { +if (!isStringEquals(peerConfig.getClusterKey(), oldPeerConfig.getClusterKey())) { throw new DoNotRetryIOException( "Changing the cluster key on an existing peer is not allowed. Existing key '" + -oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does not match new key '" + -peerConfig.getClusterKey() + "'"); + oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does not match new key '" + + peerConfig.getClusterKey() + "'"); } -if (!StringUtils.isBlank(peerConfig.getReplicationEndpointImpl()) && - !peerConfig.getReplicationEndpointImpl().equals(oldPeerConfig.getReplicationEndpointImpl())) { +if (!isStringEquals(peerConfig.getReplicationEndpointImpl(), + oldPeerConfig.getReplicationEndpointImpl())) { throw new DoNotRetryIOException("Changing the replication endpoint implementation class " + -"on an existing peer is not allowed. Existing class '" + -oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId + -" does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'"); + "on an existing peer is not allowed. Existing class '" + + oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId + + " does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'"); } } @@ -341,4 +340,15 @@ public class ReplicationPeerManager { return new ReplicationPeerManager(peerStorage, ReplicationStorageFactory.getReplicationQueueStorage(zk, conf), peers); } + + /** + * For replication peer cluster key or endpoint class, null and empty string is same. So here + * don't use {@link StringUtils#equals(CharSequence, CharSequence)} directly. + */ + private boolean isStringEquals(String s1, String s2) { +if (StringUtils.isBlank(s1)) { + return StringUtils.isBlank(s2); +} +return s1.equals(s2); + } }
[07/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java index 21d914a..e5d3a79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java @@ -102,6 +102,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase this.hri = hri; } +@Override public Procedure newProcedure(long procId) { return new RegionProcedure(procId, hri); } @@ -132,6 +133,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase this.tableName = tableName; } +@Override public Procedure newProcedure(long procId) { return new TableProcedure(procId, tableName); } @@ -196,6 +198,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase private final AtomicLong completed = new AtomicLong(0); private class AddProcsWorker extends Thread { +@Override public void run() { final Random rand = new Random(System.currentTimeMillis()); long procId = procIds.incrementAndGet(); @@ -209,6 +212,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase } private class PollAndLockWorker extends Thread { +@Override public void run() { while (completed.get() < numOps) { // With lock/unlock being ~100ns, and no other workload, 1000ns wait seams reasonable. http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java index 8dec59d..4adab53 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java @@ -99,7 +99,7 @@ public class TestModifyNamespaceProcedure { // Before modify NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); -assertEquals(currentNsDescriptor.getConfigurationValue(nsKey1), nsValue1before); +assertEquals(nsValue1before, currentNsDescriptor.getConfigurationValue(nsKey1)); assertNull(currentNsDescriptor.getConfigurationValue(nsKey2)); // Update @@ -115,8 +115,8 @@ public class TestModifyNamespaceProcedure { // Verify the namespace is updated. currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); -assertEquals(nsd.getConfigurationValue(nsKey1), nsValue1after); -assertEquals(currentNsDescriptor.getConfigurationValue(nsKey2), nsValue2); +assertEquals(nsValue1after, nsd.getConfigurationValue(nsKey1)); +assertEquals(nsValue2, currentNsDescriptor.getConfigurationValue(nsKey2)); } @Test(timeout=6) @@ -219,7 +219,7 @@ public class TestModifyNamespaceProcedure { // Validate NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); -assertEquals(currentNsDescriptor.getConfigurationValue(nsKey), nsValue); +assertEquals(nsValue, currentNsDescriptor.getConfigurationValue(nsKey)); } @Test(timeout = 6) http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index 8b58646..24a6bc5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MasterTests; import
[34/50] [abbrv] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface
http://git-wip-us.apache.org/repos/asf/hbase/blob/a791c238/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index bf9cd30..1359575 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -166,7 +166,6 @@ public class ReplicationSourceManager implements ReplicationListener { this.clusterId = clusterId; this.walFileLengthProvider = walFileLengthProvider; this.replicationTracker.registerListener(this); -this.replicationPeers.getAllPeerIds(); // It's preferable to failover 1 RS at a time, but with good zk servers // more could be processed at the same time. int nbWorkers = conf.getInt("replication.executor.workers", 1); @@ -270,8 +269,8 @@ public class ReplicationSourceManager implements ReplicationListener { } List otherRegionServers = replicationTracker.getListOfRegionServers().stream() .map(ServerName::valueOf).collect(Collectors.toList()); -LOG.info( - "Current list of replicators: " + currentReplicators + " other RSs: " + otherRegionServers); +LOG.info("Current list of replicators: " + currentReplicators + " other RSs: " ++ otherRegionServers); // Look if there's anything to process after a restart for (ServerName rs : currentReplicators) { @@ -288,7 +287,7 @@ public class ReplicationSourceManager implements ReplicationListener { * The returned future is for adoptAbandonedQueues task. */ Future init() throws IOException, ReplicationException { -for (String id : this.replicationPeers.getConnectedPeerIds()) { +for (String id : this.replicationPeers.getAllPeerIds()) { addSource(id); if (replicationForBulkLoadDataEnabled) { // Check if peer exists in hfile-refs queue, if not add it. This can happen in the case @@ -307,8 +306,8 @@ public class ReplicationSourceManager implements ReplicationListener { */ @VisibleForTesting ReplicationSourceInterface addSource(String id) throws IOException, ReplicationException { -ReplicationPeerConfig peerConfig = replicationPeers.getReplicationPeerConfig(id); -ReplicationPeer peer = replicationPeers.getConnectedPeer(id); +ReplicationPeerConfig peerConfig = replicationPeers.getPeerConfig(id); +ReplicationPeer peer = replicationPeers.getPeer(id); ReplicationSourceInterface src = getReplicationSource(id, peerConfig, peer); synchronized (this.walsById) { this.sources.add(src); @@ -354,7 +353,7 @@ public class ReplicationSourceManager implements ReplicationListener { public void deleteSource(String peerId, boolean closeConnection) { abortWhenFail(() -> this.queueStorage.removeQueue(server.getServerName(), peerId)); if (closeConnection) { - this.replicationPeers.peerDisconnected(peerId); + this.replicationPeers.removePeer(peerId); } } @@ -445,12 +444,12 @@ public class ReplicationSourceManager implements ReplicationListener { // update replication queues on ZK // synchronize on replicationPeers to avoid adding source for the to-be-removed peer synchronized (replicationPeers) { - for (String id : replicationPeers.getConnectedPeerIds()) { + for (String id : replicationPeers.getAllPeerIds()) { try { this.queueStorage.addWAL(server.getServerName(), id, logName); } catch (ReplicationException e) { - throw new IOException("Cannot add log to replication queue" + -" when creating a new source, queueId=" + id + ", filename=" + logName, e); + throw new IOException("Cannot add log to replication queue" + + " when creating a new source, queueId=" + id + ", filename=" + logName, e); } } } @@ -593,7 +592,7 @@ public class ReplicationSourceManager implements ReplicationListener { public void addPeer(String id) throws ReplicationException, IOException { LOG.info("Trying to add peer, peerId: " + id); -boolean added = this.replicationPeers.peerConnected(id); +boolean added = this.replicationPeers.addPeer(id); if (added) { LOG.info("Peer " + id + " connected success, trying to start the replication source thread."); addSource(id); @@ -729,19 +728,25 @@ public class ReplicationSourceManager implements ReplicationListener { // there is not an actual peer defined corresponding to peerId for the failover. ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(peerId);
[22/50] [abbrv] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code
HBASE-19543 Abstract a replication storage interface to extract the zk specific code Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7213a0dc Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7213a0dc Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7213a0dc Branch: refs/heads/HBASE-19397-branch-2 Commit: 7213a0dc74b4ff90e05e3e40f2cc3362d9d3fd6c Parents: f8fa2fc Author: zhangduoAuthored: Fri Dec 22 14:37:28 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../hadoop/hbase/util/CollectionUtils.java | 3 + hbase-replication/pom.xml | 12 + .../replication/ReplicationPeerStorage.java | 74 .../replication/ReplicationQueueStorage.java| 164 +++ .../replication/ReplicationStateZKBase.java | 1 - .../replication/ReplicationStorageFactory.java | 49 +++ .../replication/ZKReplicationPeerStorage.java | 164 +++ .../replication/ZKReplicationQueueStorage.java | 425 +++ .../replication/ZKReplicationStorageBase.java | 75 .../TestZKReplicationPeerStorage.java | 171 .../TestZKReplicationQueueStorage.java | 171 .../org/apache/hadoop/hbase/master/HMaster.java | 36 +- .../hadoop/hbase/master/MasterServices.java | 6 +- .../master/procedure/MasterProcedureEnv.java| 24 +- .../master/replication/AddPeerProcedure.java| 6 +- .../replication/DisablePeerProcedure.java | 7 +- .../master/replication/EnablePeerProcedure.java | 6 +- .../master/replication/ModifyPeerProcedure.java | 41 +- .../master/replication/RemovePeerProcedure.java | 6 +- .../master/replication/ReplicationManager.java | 199 - .../replication/ReplicationPeerManager.java | 331 +++ .../replication/UpdatePeerConfigProcedure.java | 7 +- .../replication/TestReplicationAdmin.java | 63 ++- .../hbase/master/MockNoopMasterServices.java| 13 +- .../hbase/master/TestMasterNoCluster.java | 4 +- .../TestReplicationDisableInactivePeer.java | 6 +- 26 files changed, 1750 insertions(+), 314 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/7213a0dc/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java index 875b124..8bbb6f1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java @@ -107,6 +107,9 @@ public class CollectionUtils { return list.get(list.size() - 1); } + public static List nullToEmpty(List list) { +return list != null ? list : Collections.emptyList(); + } /** * In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the * value already exists. Notice that the implementation does not guarantee that the supplier will http://git-wip-us.apache.org/repos/asf/hbase/blob/7213a0dc/hbase-replication/pom.xml -- diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index bd593d3..b28e852 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -121,6 +121,18 @@ org.apache.hbase hbase-zookeeper + + org.apache.hbase + hbase-common + test-jar + test + + + org.apache.hbase + hbase-zookeeper + test-jar + test + org.apache.commons http://git-wip-us.apache.org/repos/asf/hbase/blob/7213a0dc/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java new file mode 100644 index 000..e00cd0d --- /dev/null +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You
[29/50] [abbrv] hbase git commit: HBASE-19642 Fix locking for peer modification procedure
HBASE-19642 Fix locking for peer modification procedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f90fcc9e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f90fcc9e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f90fcc9e Branch: refs/heads/HBASE-19397-branch-2 Commit: f90fcc9e7ae5124dfaf42b416603a8baf1debd48 Parents: 2417c32 Author: zhangduoAuthored: Wed Dec 27 18:27:13 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../procedure/MasterProcedureScheduler.java | 14 + .../master/replication/ModifyPeerProcedure.java | 21 +--- 2 files changed, 32 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f90fcc9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index a25217c..4ecb3b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -610,6 +610,20 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { public boolean requireExclusiveLock(Procedure proc) { return requirePeerExclusiveLock((PeerProcedureInterface) proc); } + +@Override +public boolean isAvailable() { + if (isEmpty()) { +return false; + } + if (getLockStatus().hasExclusiveLock()) { +// if we have an exclusive lock already taken +// only child of the lock owner can be executed +Procedure nextProc = peek(); +return nextProc != null && getLockStatus().hasLockAccess(nextProc); + } + return true; +} } // http://git-wip-us.apache.org/repos/asf/hbase/blob/f90fcc9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java index 279fbc7..a682606 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java @@ -46,6 +46,8 @@ public abstract class ModifyPeerProcedure protected String peerId; + private volatile boolean locked; + // used to keep compatible with old client where we can only returns after updateStorage. protected ProcedurePrepareLatch latch; @@ -145,17 +147,30 @@ public abstract class ModifyPeerProcedure @Override protected LockState acquireLock(MasterProcedureEnv env) { -return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId) - ? LockState.LOCK_EVENT_WAIT - : LockState.LOCK_ACQUIRED; +if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) { + return LockState.LOCK_EVENT_WAIT; +} +locked = true; +return LockState.LOCK_ACQUIRED; } @Override protected void releaseLock(MasterProcedureEnv env) { +locked = false; env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId); } @Override + protected boolean holdLock(MasterProcedureEnv env) { +return true; + } + + @Override + protected boolean hasLock(MasterProcedureEnv env) { +return locked; + } + + @Override protected void rollbackState(MasterProcedureEnv env, PeerModificationState state) throws IOException, InterruptedException { if (state == PeerModificationState.PRE_PEER_MODIFICATION) {
[04/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 5098e0b..410dd0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -101,7 +101,7 @@ public abstract class MultiThreadedAction { @Override public byte[] getDeterministicUniqueKey(long keyBase) { - return LoadTestKVGenerator.md5PrefixedKey(keyBase).getBytes(); + return Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(keyBase)); } @Override @@ -114,7 +114,7 @@ public abstract class MultiThreadedAction { int numColumns = minColumnsPerKey + random.nextInt(maxColumnsPerKey - minColumnsPerKey + 1); byte[][] columns = new byte[numColumns][]; for (int i = 0; i < numColumns; ++i) { -columns[i] = Integer.toString(i).getBytes(); +columns[i] = Bytes.toBytes(Integer.toString(i)); } return columns; } http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index 447cca8..6864366 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -201,8 +201,7 @@ public class MultiThreadedReader extends MultiThreadedAction "to read " + k + " is out of range (startKey=" + startKey + ", endKey=" + endKey + ")"); } - if (k % numThreads != readerId || - writer != null && writer.failedToWriteKey(k)) { + if (k % numThreads != readerId || (writer != null && writer.failedToWriteKey(k))) { // Skip keys that this thread should not read, as well as the keys // that we know the writer failed to write. continue; http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java index 7112d50..7746bea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java @@ -42,7 +42,7 @@ import org.junit.experimental.categories.Category; public class TestBoundedPriorityBlockingQueue { private final static int CAPACITY = 16; - class TestObject { + static class TestObject { private final int priority; private final int seqId; @@ -60,7 +60,7 @@ public class TestBoundedPriorityBlockingQueue { } } - class TestObjectComparator implements Comparator { + static class TestObjectComparator implements Comparator { public TestObjectComparator() {} @Override @@ -208,6 +208,7 @@ public class TestBoundedPriorityBlockingQueue { final CyclicBarrier threadsStarted = new CyclicBarrier(2); ExecutorService executor = Executors.newFixedThreadPool(2); executor.execute(new Runnable() { + @Override public void run() { try { assertNull(queue.poll(1000, TimeUnit.MILLISECONDS)); @@ -221,6 +222,7 @@ public class TestBoundedPriorityBlockingQueue { }); executor.execute(new Runnable() { + @Override public void run() { try { threadsStarted.await(); http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java index 4c6990e..ecc6611 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java @@ -36,7 +36,7 @@ public class TestByteBuffUtils { ByteBuffer bb2 = ByteBuffer.allocate(50); MultiByteBuff src = new MultiByteBuff(bb1,
[02/50] [abbrv] hbase git commit: HBASE-15321 - Ability to open a HRegion from hdfs snapshot.
HBASE-15321 - Ability to open a HRegion from hdfs snapshot. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0fa24ddd Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0fa24ddd Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0fa24ddd Branch: refs/heads/HBASE-19397-branch-2 Commit: 0fa24dddb994ff1538691436a16ceb95383327cd Parents: 6f8c312 Author: Rahul GidwaniAuthored: Mon Jan 22 12:13:13 2018 -0800 Committer: Rahul Gidwani Committed: Mon Jan 22 12:13:13 2018 -0800 -- .../hadoop/hbase/regionserver/HRegion.java | 27 - .../hbase/regionserver/HRegionFileSystem.java | 4 +- .../regionserver/TestHdfsSnapshotHRegion.java | 117 +++ 3 files changed, 145 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0fa24ddd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index aa9fa03..e18c80e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL; import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY; import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent; - import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; @@ -89,6 +88,7 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -7001,6 +7001,31 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return this; } + /** + * Open a Region on a read-only file-system (like hdfs snapshots) + * @param conf The Configuration object to use. + * @param fs Filesystem to use + * @param info Info for region to be opened. + * @param htd the table descriptor + * @return new HRegion + * @throws IOException e + */ + public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, final FileSystem fs, + final Path tableDir, RegionInfo info, final TableDescriptor htd) throws IOException { +if (info == null) { + throw new NullPointerException("Passed region info is null"); +} +if (LOG.isDebugEnabled()) { + LOG.debug("Opening region (readOnly filesystem): " + info); +} +if (info.getReplicaId() <= 0) { + info = new HRegionInfo((HRegionInfo) info, 1); +} +HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null); +r.writestate.setReadOnly(true); +return r.openHRegion(null); + } + public static void warmupHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal, final Configuration conf, final RegionServerServices rsServices, http://git-wip-us.apache.org/repos/asf/hbase/blob/0fa24ddd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 11833a5..00dc0d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -27,7 +27,6 @@ import java.util.Collection; import java.util.List; import java.util.Optional; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -51,6 +50,7 @@ import org.apache.hadoop.hbase.util.FSHDFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +76,7
[37/50] [abbrv] hbase git commit: HBASE-19697 Remove TestReplicationAdminUsingProcedure
HBASE-19697 Remove TestReplicationAdminUsingProcedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca706683 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca706683 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca706683 Branch: refs/heads/HBASE-19397-branch-2 Commit: ca70668309f32262ba4c8edfce34ec10b2340a51 Parents: 424fa6c Author: zhangduoAuthored: Wed Jan 3 21:13:57 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../TestReplicationAdminUsingProcedure.java | 225 --- 1 file changed, 225 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/ca706683/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java deleted file mode 100644 index 1300376..000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java +++ /dev/null @@ -1,225 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client.replication; - -import java.io.IOException; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -import org.apache.hadoop.hbase.replication.TestReplicationBase; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; - -@Category({ MediumTests.class, ClientTests.class }) -public class TestReplicationAdminUsingProcedure extends TestReplicationBase { - - private static final String PEER_ID = "2"; - private static final Logger LOG = Logger.getLogger(TestReplicationAdminUsingProcedure.class); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { -conf1.setInt("hbase.multihconnection.threads.max", 10); - -// Start the master & slave mini cluster. -TestReplicationBase.setUpBeforeClass(); - -// Remove the replication peer -hbaseAdmin.removeReplicationPeer(PEER_ID); - } - - private void loadData(int startRowKey, int endRowKey) throws IOException { -for (int i = startRowKey; i < endRowKey; i++) { - byte[] rowKey = Bytes.add(row, Bytes.toBytes(i)); - Put put = new Put(rowKey); - put.addColumn(famName, null, Bytes.toBytes(i)); - htable1.put(put); -} - } - - private void waitForReplication(int expectedRows, int retries) - throws IOException, InterruptedException { -Scan scan; -for (int i = 0; i < retries; i++) { - scan = new Scan(); - if (i == retries - 1) { -throw new IOException("Waited too much time for normal batch replication"); - } - try (ResultScanner scanner = htable2.getScanner(scan)) { -int count = 0; -for (Result res : scanner) { - count++; -} -if (count != expectedRows) { - LOG.info("Only got " + count + " rows, expected rows: " + expectedRows); - Thread.sleep(SLEEP_TIME); -} else { - return; -} - } -} - } - - @Before - public void setUp() throws IOException { -
[33/50] [abbrv] hbase git commit: HBASE-19635 Introduce a thread at RS side to call reportProcedureDone
HBASE-19635 Introduce a thread at RS side to call reportProcedureDone Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/52458290 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/52458290 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/52458290 Branch: refs/heads/HBASE-19397-branch-2 Commit: 52458290ed4f483c5737d86a8dc8a3c9cfbaf59e Parents: b3b92ef Author: zhangduoAuthored: Wed Dec 27 20:13:42 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:04 2018 +0800 -- .../src/main/protobuf/RegionServerStatus.proto | 5 +- .../hadoop/hbase/master/MasterRpcServices.java | 15 ++- .../hbase/regionserver/HRegionServer.java | 72 .../RemoteProcedureResultReporter.java | 111 +++ .../handler/RSProcedureHandler.java | 2 +- 5 files changed, 149 insertions(+), 56 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/52458290/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto -- diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto index 4f75941..3f836cd 100644 --- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto @@ -146,7 +146,7 @@ message RegionSpaceUseReportRequest { message RegionSpaceUseReportResponse { } -message ReportProcedureDoneRequest { +message RemoteProcedureResult { required uint64 proc_id = 1; enum Status { SUCCESS = 1; @@ -155,6 +155,9 @@ message ReportProcedureDoneRequest { required Status status = 2; optional ForeignExceptionMessage error = 3; } +message ReportProcedureDoneRequest { + repeated RemoteProcedureResult result = 1; +} message ReportProcedureDoneResponse { } http://git-wip-us.apache.org/repos/asf/hbase/blob/52458290/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 72bf2d1..377a9c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -265,6 +265,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; @@ -2254,12 +2255,14 @@ public class MasterRpcServices extends RSRpcServices @Override public ReportProcedureDoneResponse reportProcedureDone(RpcController controller, ReportProcedureDoneRequest request) throws ServiceException { -if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) { - master.remoteProcedureCompleted(request.getProcId()); -} else { - master.remoteProcedureFailed(request.getProcId(), -RemoteProcedureException.fromProto(request.getError())); -} +request.getResultList().forEach(result -> { + if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { +master.remoteProcedureCompleted(result.getProcId()); + } else { +master.remoteProcedureFailed(result.getProcId(), + RemoteProcedureException.fromProto(result.getError())); + } +}); return ReportProcedureDoneResponse.getDefaultInstance(); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/52458290/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 3c8ec17..3844415 100644 ---
[48/50] [abbrv] hbase git commit: HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint
HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0e88ed1 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0e88ed1 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0e88ed1 Branch: refs/heads/HBASE-19397-branch-2 Commit: b0e88ed14aa6b40a1d26d9d4cd9175c769dc97ed Parents: aab18b4 Author: zhangduoAuthored: Fri Jan 5 18:28:44 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:20:38 2018 +0800 -- .../RecoveredReplicationSource.java | 16 +- .../regionserver/ReplicationSource.java | 203 ++- 2 files changed, 116 insertions(+), 103 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b0e88ed1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 1be9a88..3cae0f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -68,7 +68,7 @@ public class RecoveredReplicationSource extends ReplicationSource { LOG.debug("Someone has beat us to start a worker thread for wal group " + walGroupId); } else { LOG.debug("Starting up worker for wal group " + walGroupId); - worker.startup(getUncaughtExceptionHandler()); + worker.startup(this::uncaughtException); worker.setWALReader( startNewWALReader(worker.getName(), walGroupId, queue, worker.getStartPosition())); workerThreads.put(walGroupId, worker); @@ -76,13 +76,13 @@ public class RecoveredReplicationSource extends ReplicationSource { } @Override - protected ReplicationSourceWALReader startNewWALReader(String threadName, - String walGroupId, PriorityBlockingQueue queue, long startPosition) { -ReplicationSourceWALReader walReader = new RecoveredReplicationSourceWALReader(fs, -conf, queue, startPosition, walEntryFilter, this); -Threads.setDaemonThreadRunning(walReader, threadName -+ ".replicationSource.replicationWALReaderThread." + walGroupId + "," + queueId, - getUncaughtExceptionHandler()); + protected ReplicationSourceWALReader startNewWALReader(String threadName, String walGroupId, + PriorityBlockingQueue queue, long startPosition) { +ReplicationSourceWALReader walReader = + new RecoveredReplicationSourceWALReader(fs, conf, queue, startPosition, walEntryFilter, this); +Threads.setDaemonThreadRunning(walReader, + threadName + ".replicationSource.replicationWALReaderThread." + walGroupId + "," + queueId, + this::uncaughtException); return walReader; } http://git-wip-us.apache.org/repos/asf/hbase/blob/b0e88ed1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 0092251..09b6cc1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -75,7 +75,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; * */ @InterfaceAudience.Private -public class ReplicationSource extends Thread implements ReplicationSourceInterface { +public class ReplicationSource implements ReplicationSourceInterface { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSource.class); // Queues of logs to process, entry in format of walGroupId->queue, @@ -114,10 +114,8 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf private MetricsSource metrics; // WARN threshold for the number of queued logs, defaults to 2 private int logQueueWarnThreshold; - // whether the replication endpoint has been initialized - private volatile boolean endpointInitialized = false; // ReplicationEndpoint which will handle the actual replication - private ReplicationEndpoint replicationEndpoint; + private volatile
[50/50] [abbrv] hbase git commit: HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs
HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21986839 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21986839 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21986839 Branch: refs/heads/HBASE-19397-branch-2 Commit: 21986839374c65f88900a09b88ca434c93ba1e6f Parents: 2aff1ed Author: huzhengAuthored: Wed Jan 10 15:00:30 2018 +0800 Committer: zhangduo Committed: Tue Jan 23 18:20:38 2018 +0800 -- .../main/java/org/apache/hadoop/hbase/master/HMaster.java | 9 - 1 file changed, 4 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/21986839/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a7ef275..c7e67ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -38,7 +38,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; @@ -3378,13 +3378,12 @@ public class HMaster extends HRegionServer implements MasterServices { cpHost.preGetReplicationPeerConfig(peerId); } LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId); -Optional peerConfig = - this.replicationPeerManager.getPeerConfig(peerId); - +ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId) +.orElseThrow(() -> new ReplicationPeerNotFoundException(peerId)); if (cpHost != null) { cpHost.postGetReplicationPeerConfig(peerId); } -return peerConfig.orElse(null); +return peerConfig; } @Override
[19/50] [abbrv] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bcb79d53 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bcb79d53 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bcb79d53 Branch: refs/heads/HBASE-19397-branch-2 Commit: bcb79d53c38859dc1bd8772eb989cde1401fd711 Parents: 211e6f5 Author: zhangduoAuthored: Thu Dec 21 21:59:46 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:18:00 2018 +0800 -- .../hadoop/hbase/master/replication/AddPeerProcedure.java | 6 +++--- .../hadoop/hbase/master/replication/DisablePeerProcedure.java | 6 +++--- .../hadoop/hbase/master/replication/EnablePeerProcedure.java | 6 +++--- .../hadoop/hbase/master/replication/ModifyPeerProcedure.java | 6 +++--- .../hadoop/hbase/master/replication/RefreshPeerProcedure.java | 6 +++--- .../hadoop/hbase/master/replication/RemovePeerProcedure.java | 6 +++--- .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++--- 7 files changed, 21 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/bcb79d53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java index c3862d8..066c3e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; @@ -28,6 +26,8 @@ import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData; @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A @InterfaceAudience.Private public class AddPeerProcedure extends ModifyPeerProcedure { - private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class); + private static final Logger LOG = LoggerFactory.getLogger(AddPeerProcedure.class); private ReplicationPeerConfig peerConfig; http://git-wip-us.apache.org/repos/asf/hbase/blob/bcb79d53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java index 0b32db9..9a28de6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java @@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The procedure for disabling a replication peer. @@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class DisablePeerProcedure extends ModifyPeerProcedure { - private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class); + private static final Logger LOG = LoggerFactory.getLogger(DisablePeerProcedure.class); public DisablePeerProcedure() { } http://git-wip-us.apache.org/repos/asf/hbase/blob/bcb79d53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
[06/50] [abbrv] hbase git commit: HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2)
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index a497bf4..59a0c31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -71,7 +71,7 @@ public class TestRegionReplicas { private static final int NB_SERVERS = 1; private static Table table; - private static final byte[] row = "TestRegionReplicas".getBytes(); + private static final byte[] row = Bytes.toBytes("TestRegionReplicas"); private static HRegionInfo hriPrimary; private static HRegionInfo hriSecondary; http://git-wip-us.apache.org/repos/asf/hbase/blob/b1269ec5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java index d1bf773..9a02a9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java @@ -111,7 +111,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be more than 1", totalRegions, 3); + assertEquals("the number of regions should be more than 1", 3, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -132,7 +132,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 30", totalRegions, 30); + assertEquals("the number of regions should be equal to 30", 30, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -148,7 +148,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be 3", totalRegions, 3); + assertEquals("the number of regions should be 3", 3, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -164,7 +164,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 2", totalRegions, 2); + assertEquals("the number of regions should be reduced to 2", 2, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -181,7 +181,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 40", totalRegions, 40); + assertEquals("the number of regions should be reduced to 40", 40, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -198,7 +198,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 45", totalRegions, 3 * 15); + assertEquals("the number of regions should be equal to 45", 3 * 15, totalRegions); } finally { disableAndDeleteTable(tableName); }
[45/50] [abbrv] hbase git commit: HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase
HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/424fa6cf Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/424fa6cf Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/424fa6cf Branch: refs/heads/HBASE-19397-branch-2 Commit: 424fa6cf29c9df9b80d2c5e1a5dd3ac197d6240f Parents: ba414a7 Author: huzhengAuthored: Fri Dec 29 15:55:28 2017 +0800 Committer: zhangduo Committed: Tue Jan 23 18:19:45 2018 +0800 -- .../hbase/replication/ReplicationFactory.java | 5 +- .../replication/ReplicationStateZKBase.java | 153 --- .../replication/ReplicationTrackerZKImpl.java | 21 +-- .../replication/ZKReplicationPeerStorage.java | 24 ++- .../replication/ZKReplicationStorageBase.java | 13 +- .../org/apache/hadoop/hbase/master/HMaster.java | 4 +- .../master/ReplicationPeerConfigUpgrader.java | 128 .../regionserver/DumpReplicationQueues.java | 18 +-- .../replication/regionserver/Replication.java | 3 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 3 +- .../TestReplicationTrackerZKImpl.java | 3 +- .../replication/master/TestTableCFsUpdater.java | 41 ++--- .../TestReplicationSourceManager.java | 6 +- 13 files changed, 136 insertions(+), 286 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/424fa6cf/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 6c66aff..2a970ba 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -33,9 +33,8 @@ public class ReplicationFactory { return new ReplicationPeers(zk, conf); } - public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, - final ReplicationPeers replicationPeers, Configuration conf, Abortable abortable, + public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, Abortable abortable, Stoppable stopper) { -return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, stopper); +return new ReplicationTrackerZKImpl(zookeeper, abortable, stopper); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/424fa6cf/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java deleted file mode 100644 index f49537c..000 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import