HBASE-12796 Clean up HTable and HBaseAdmin deprecated constructor usage (Jurriaan Mous)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9246af8d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9246af8d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9246af8d Branch: refs/heads/master Commit: 9246af8dac5e80be92ba25ff7ee8b736f9e78924 Parents: 645fbd7 Author: tedyu <[email protected]> Authored: Thu Jan 8 07:49:28 2015 -0800 Committer: tedyu <[email protected]> Committed: Thu Jan 8 07:49:28 2015 -0800 ---------------------------------------------------------------------- .../apache/hadoop/hbase/MetaTableAccessor.java | 2 +- .../client/replication/ReplicationAdmin.java | 15 +- .../hbase/client/TestClientNoCluster.java | 16 +- .../example/TestBulkDeleteProtocol.java | 6 +- .../example/TestRowCountEndpoint.java | 2 +- .../TestZooKeeperScanPolicyObserver.java | 2 +- .../hbase/IntegrationTestLazyCfLoading.java | 6 +- .../actions/RestartRsHoldingTableAction.java | 29 ++-- .../mapreduce/IntegrationTestImportTsv.java | 2 +- .../hadoop/hbase/mttr/IntegrationTestMTTR.java | 9 +- .../test/IntegrationTestBigLinkedList.java | 59 ++++---- ...egrationTestBigLinkedListWithVisibility.java | 6 +- .../test/IntegrationTestLoadAndVerify.java | 12 +- .../trace/IntegrationTestSendTraceRequests.java | 6 +- .../hadoop/hbase/rest/TestGzipFilter.java | 2 +- .../hbase/rest/TestScannersWithFilters.java | 2 +- .../hbase/rest/TestScannersWithLabels.java | 35 ++--- .../hadoop/hbase/rest/TestTableResource.java | 2 +- .../hbase/rest/client/TestRemoteTable.java | 4 +- .../apache/hadoop/hbase/LocalHBaseCluster.java | 7 +- .../hbase/coprocessor/CoprocessorHost.java | 5 +- .../hadoop/hbase/mapred/TableInputFormat.java | 7 +- .../hbase/mapred/TableInputFormatBase.java | 3 +- .../hadoop/hbase/mapreduce/CopyTable.java | 12 +- .../hbase/mapreduce/HFileOutputFormat2.java | 19 ++- .../apache/hadoop/hbase/mapreduce/Import.java | 12 +- .../hadoop/hbase/mapreduce/ImportTsv.java | 4 +- .../hbase/mapreduce/LoadIncrementalHFiles.java | 4 +- .../hbase/mapreduce/MultiTableOutputFormat.java | 26 +++- .../hadoop/hbase/mapreduce/WALPlayer.java | 2 +- .../replication/VerifyReplication.java | 30 +++- .../org/apache/hadoop/hbase/util/HMerge.java | 6 +- .../resources/hbase-webapps/master/table.jsp | 6 +- .../hadoop/hbase/HBaseTestingUtility.java | 103 +++++++------ .../hadoop/hbase/PerformanceEvaluation.java | 7 +- .../hadoop/hbase/ScanPerformanceEvaluation.java | 6 +- .../apache/hadoop/hbase/TestAcidGuarantees.java | 11 +- .../apache/hadoop/hbase/TestInfoServers.java | 6 +- .../apache/hadoop/hbase/TestMultiVersions.java | 45 +++--- .../org/apache/hadoop/hbase/TestNamespace.java | 2 +- .../org/apache/hadoop/hbase/TestZooKeeper.java | 26 ++-- .../apache/hadoop/hbase/client/TestAdmin1.java | 34 ++--- .../apache/hadoop/hbase/client/TestAdmin2.java | 18 +-- .../client/TestClientOperationInterrupt.java | 7 +- .../hadoop/hbase/client/TestClientTimeouts.java | 22 +-- .../client/TestCloneSnapshotFromClient.java | 2 +- .../hadoop/hbase/client/TestFromClientSide.java | 32 ++-- .../hbase/client/TestFromClientSide3.java | 4 +- .../org/apache/hadoop/hbase/client/TestHCM.java | 29 ++-- .../hadoop/hbase/client/TestMultiParallel.java | 16 +- .../hbase/client/TestReplicaWithCluster.java | 6 +- .../client/TestRestoreSnapshotFromClient.java | 4 +- .../hbase/client/TestRpcControllerFactory.java | 4 +- .../hadoop/hbase/client/TestScannerTimeout.java | 16 +- .../client/TestSnapshotCloneIndependence.java | 8 +- .../hbase/client/TestSnapshotFromClient.java | 4 +- .../hbase/client/TestSnapshotMetadata.java | 8 +- .../hbase/client/TestTableSnapshotScanner.java | 2 +- .../hadoop/hbase/constraint/TestConstraint.java | 10 +- .../TestBatchCoprocessorEndpoint.java | 13 +- .../coprocessor/TestCoprocessorEndpoint.java | 12 +- .../coprocessor/TestOpenTableInCoprocessor.java | 25 ++- .../coprocessor/TestRegionObserverBypass.java | 4 +- .../TestRegionObserverInterface.java | 6 +- .../TestRegionObserverScannerOpenHook.java | 2 +- .../TestRegionServerCoprocessorEndpoint.java | 2 +- .../coprocessor/TestRegionServerObserver.java | 2 +- .../hbase/filter/FilterTestingCluster.java | 28 ++-- .../hbase/filter/TestFilterWithScanLimits.java | 3 +- .../hadoop/hbase/filter/TestFilterWrapper.java | 12 +- .../hadoop/hbase/filter/TestScanRowPrefix.java | 3 +- .../hbase/io/encoding/TestChangingEncoding.java | 29 ++-- .../encoding/TestLoadAndSwitchEncodeOnDisk.java | 4 +- .../hadoop/hbase/mapred/TestTableMapReduce.java | 3 +- .../TableSnapshotInputFormatTestBase.java | 3 +- .../hbase/mapreduce/TestHFileOutputFormat.java | 20 +-- .../hbase/mapreduce/TestImportExport.java | 25 +-- .../TestImportTSVWithOperationAttributes.java | 7 +- .../TestImportTSVWithVisibilityLabels.java | 5 +- .../hadoop/hbase/mapreduce/TestImportTsv.java | 6 +- .../mapreduce/TestLoadIncrementalHFiles.java | 2 +- .../TestLoadIncrementalHFilesSplitRecovery.java | 2 +- .../mapreduce/TestMultithreadedTableMapper.java | 13 +- .../hbase/mapreduce/TestTableMapReduce.java | 9 +- .../hbase/mapreduce/TestTableMapReduceBase.java | 8 +- .../hbase/mapreduce/TestTimeRangeMapRed.java | 44 +++--- .../hbase/master/TestAssignmentListener.java | 2 +- .../master/TestAssignmentManagerOnCluster.java | 26 ++-- .../TestMasterOperationsForRegionReplicas.java | 2 +- .../TestMasterRestartAfterDisablingTable.java | 2 +- .../hbase/master/TestMasterTransitions.java | 7 +- .../hadoop/hbase/master/TestRestartCluster.java | 10 +- .../master/cleaner/TestSnapshotFromMaster.java | 4 +- .../handler/TestTableDeleteFamilyHandler.java | 3 +- .../hbase/regionserver/TestCompactionState.java | 12 +- .../regionserver/TestEncryptionKeyRotation.java | 2 +- .../TestEncryptionRandomKeying.java | 2 +- .../TestEndToEndSplitTransaction.java | 11 +- .../hbase/regionserver/TestFSErrorsExposed.java | 5 +- .../regionserver/TestHRegionOnCluster.java | 2 +- .../regionserver/TestHRegionServerBulkLoad.java | 5 +- .../hbase/regionserver/TestJoinedScanners.java | 2 +- .../hbase/regionserver/TestRegionReplicas.java | 2 +- .../regionserver/TestRegionServerMetrics.java | 26 +--- .../regionserver/TestSCVFWithMiniCluster.java | 2 +- .../regionserver/TestScannerWithBulkload.java | 23 +-- .../regionserver/TestServerCustomProtocol.java | 14 +- .../TestSplitTransactionOnCluster.java | 14 +- .../hadoop/hbase/regionserver/TestTags.java | 8 +- .../regionserver/wal/TestLogRollAbort.java | 6 +- .../regionserver/wal/TestLogRollPeriod.java | 4 +- .../hbase/regionserver/wal/TestLogRolling.java | 12 +- .../hbase/regionserver/wal/TestWALReplay.java | 2 +- .../replication/TestMultiSlaveReplication.java | 12 +- ...estReplicationChangingPeerRegionservers.java | 4 +- .../replication/TestReplicationSmallTests.java | 6 +- .../replication/TestReplicationSyncUpTool.java | 37 +++-- .../replication/TestReplicationWithTags.java | 6 +- .../access/TestAccessControlFilter.java | 14 +- .../security/access/TestAccessController.java | 86 ++++++----- .../security/access/TestAccessController2.java | 5 +- .../access/TestCellACLWithMultipleVersions.java | 16 +- .../hbase/security/access/TestCellACLs.java | 59 +++++--- .../security/access/TestNamespaceCommands.java | 14 +- .../access/TestScanEarlyTermination.java | 22 ++- .../security/access/TestTablePermissions.java | 2 +- .../ExpAsStringVisibilityLabelServiceImpl.java | 9 +- .../TestDefaultScanLabelGeneratorStack.java | 6 +- .../TestEnforcingScanLabelGenerator.java | 4 +- ...sibilityLabelReplicationWithExpAsString.java | 14 +- .../visibility/TestVisibilityLabels.java | 17 ++- .../TestVisibilityLabelsReplication.java | 27 ++-- .../visibility/TestVisibilityLabelsWithACL.java | 14 +- ...ibilityLabelsWithDefaultVisLabelService.java | 2 +- .../TestVisibilityLabelsWithDeletes.java | 151 +++++++++---------- .../TestVisibilityWithCheckAuths.java | 23 ++- .../hbase/snapshot/SnapshotTestingUtils.java | 6 +- .../hbase/snapshot/TestExportSnapshot.java | 1 - .../snapshot/TestFlushSnapshotFromClient.java | 7 +- .../TestRestoreFlushSnapshotFromClient.java | 3 +- .../hbase/util/MultiThreadedReaderWithACL.java | 2 +- .../hbase/util/MultiThreadedUpdaterWithACL.java | 4 +- .../hbase/util/MultiThreadedWriterWithACL.java | 2 +- .../util/ProcessBasedLocalHBaseCluster.java | 2 +- .../hadoop/hbase/util/RestartMetaTest.java | 10 +- .../hbase/util/TestCoprocessorScanPolicy.java | 2 +- .../hbase/util/TestHBaseFsckEncryption.java | 2 +- .../hadoop/hbase/util/TestMergeTable.java | 6 +- .../util/TestMiniClusterLoadSequential.java | 2 +- .../hbase/util/TestProcessBasedCluster.java | 2 +- .../hadoop/hbase/util/TestRegionSplitter.java | 3 +- .../util/hbck/OfflineMetaRebuildTestCore.java | 10 +- .../apache/hadoop/hbase/thrift2/HTablePool.java | 20 ++- .../hadoop/hbase/thrift2/TestHTablePool.java | 4 +- .../thrift2/TestThriftHBaseServiceHandler.java | 2 +- ...TestThriftHBaseServiceHandlerWithLabels.java | 2 +- 156 files changed, 1087 insertions(+), 849 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 5abf6a4..108662c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -190,7 +190,7 @@ public class MetaTableAccessor { // There should still be a way to use this method with an unmanaged connection. if (connection instanceof ClusterConnection) { if (((ClusterConnection) connection).isManaged()) { - return new HTable(TableName.META_TABLE_NAME, (ClusterConnection) connection); + return new HTable(TableName.META_TABLE_NAME, connection); } } return connection.getTable(TableName.META_TABLE_NAME); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 3db8c1c..ea7dc60 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -41,9 +41,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; @@ -91,7 +90,7 @@ public class ReplicationAdmin implements Closeable { public static final String REPLICATIONGLOBAL = Integer .toString(HConstants.REPLICATION_SCOPE_GLOBAL); - private final HConnection connection; + private final Connection connection; // TODO: replication should be managed by master. All the classes except ReplicationAdmin should // be moved to hbase-server. Resolve it in HBASE-11392. private final ReplicationQueuesClient replicationQueuesClient; @@ -109,7 +108,7 @@ public class ReplicationAdmin implements Closeable { throw new RuntimeException("hbase.replication isn't true, please " + "enable it in order to use replication"); } - this.connection = HConnectionManager.getConnection(conf); + this.connection = ConnectionFactory.createConnection(conf); ZooKeeperWatcher zkw = createZooKeeperWatcher(); try { this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, this.connection); @@ -323,7 +322,7 @@ public class ReplicationAdmin implements Closeable { * Append the replicable table-cf config of the specified peer * @param id a short that identifies the cluster * @param tableCfs table-cfs config str - * @throws KeeperException + * @throws ReplicationException */ public void appendPeerTableCFs(String id, String tableCfs) throws ReplicationException { appendPeerTableCFs(id, parseTableCFsFromConfig(tableCfs)); @@ -333,7 +332,7 @@ public class ReplicationAdmin implements Closeable { * Append the replicable table-cf config of the specified peer * @param id a short that identifies the cluster * @param tableCfs A map from tableName to column family names - * @throws KeeperException + * @throws ReplicationException */ public void appendPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException { @@ -469,7 +468,7 @@ public class ReplicationAdmin implements Closeable { public List<HashMap<String, String>> listReplicated() throws IOException { List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>(); - Admin admin = new HBaseAdmin(this.connection.getConfiguration()); + Admin admin = connection.getAdmin(); HTableDescriptor[] tables; try { tables = admin.listTables(); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index da643fc..43e0b75 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -145,7 +145,8 @@ public class TestClientNoCluster extends Configured implements Tool { Configuration localConfig = HBaseConfiguration.create(this.conf); // This override mocks up our exists/get call to throw a RegionServerStoppedException. localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName()); - Table table = new HTable(localConfig, TableName.META_TABLE_NAME); + Connection connection = ConnectionFactory.createConnection(localConfig); + Table table = connection.getTable(TableName.META_TABLE_NAME); Throwable t = null; LOG.info("Start"); try { @@ -161,6 +162,7 @@ public class TestClientNoCluster extends Configured implements Tool { } finally { table.close(); } + connection.close(); LOG.info("Stop"); assertTrue(t != null); } @@ -182,7 +184,8 @@ public class TestClientNoCluster extends Configured implements Tool { // and it has expired. Otherwise, if this functionality is broke, all retries will be run -- // all ten of them -- and we'll get the RetriesExhaustedException exception. localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1); - Table table = new HTable(localConfig, TableName.META_TABLE_NAME); + Connection connection = ConnectionFactory.createConnection(localConfig); + Table table = connection.getTable(TableName.META_TABLE_NAME); Throwable t = null; try { // An exists call turns into a get w/ a flag. @@ -196,6 +199,7 @@ public class TestClientNoCluster extends Configured implements Tool { fail(); } finally { table.close(); + connection.close(); } assertTrue(t != null); } @@ -216,7 +220,8 @@ public class TestClientNoCluster extends Configured implements Tool { // Go against meta else we will try to find first region for the table on construction which // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. - Table table = new HTable(this.conf, TableName.META_TABLE_NAME); + Connection connection = ConnectionFactory.createConnection(this.conf); + Table table = connection.getTable(TableName.META_TABLE_NAME); ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY); try { Result result = null; @@ -226,6 +231,7 @@ public class TestClientNoCluster extends Configured implements Tool { } finally { scanner.close(); table.close(); + connection.close(); } } @@ -236,7 +242,8 @@ public class TestClientNoCluster extends Configured implements Tool { // Go against meta else we will try to find first region for the table on construction which // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. - Table table = new HTable(this.conf, TableName.META_TABLE_NAME); + Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(TableName.META_TABLE_NAME); ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY); try { Result result = null; @@ -246,6 +253,7 @@ public class TestClientNoCluster extends Configured implements Tool { } finally { scanner.close(); table.close(); + connection.close(); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java ---------------------------------------------------------------------- diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java index 87e655e..511bf46 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java @@ -125,7 +125,7 @@ public class TestBulkDeleteProtocol { private long invokeBulkDeleteProtocol(TableName tableName, final Scan scan, final int rowBatchSize, final DeleteType deleteType, final Long timeStamp) throws Throwable { - Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + Table ht = TEST_UTIL.getConnection().getTable(tableName); long noOfDeletedRows = 0L; Batch.Call<BulkDeleteService, BulkDeleteResponse> callable = new Batch.Call<BulkDeleteService, BulkDeleteResponse>() { @@ -220,7 +220,7 @@ public class TestBulkDeleteProtocol { htd.addFamily(new HColumnDescriptor(FAMILY1)); htd.addFamily(new HColumnDescriptor(FAMILY2)); TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5); - Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + Table ht = TEST_UTIL.getConnection().getTable(tableName); List<Put> puts = new ArrayList<Put>(100); for (int j = 0; j < 100; j++) { Put put = new Put(Bytes.toBytes(j)); @@ -430,7 +430,7 @@ public class TestBulkDeleteProtocol { hcd.setMaxVersions(10);// Just setting 10 as I am not testing with more than 10 versions here htd.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5); - Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + Table ht = TEST_UTIL.getConnection().getTable(tableName); return ht; } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRowCountEndpoint.java ---------------------------------------------------------------------- diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRowCountEndpoint.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRowCountEndpoint.java index ddc5847..481cb91 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRowCountEndpoint.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRowCountEndpoint.java @@ -71,7 +71,7 @@ public class TestRowCountEndpoint { // @Ignore @Test public void testEndpoint() throws Throwable { - Table table = new HTable(CONF, TEST_TABLE); + Table table = TEST_UTIL.getConnection().getTable(TEST_TABLE); // insert some test rows for (int i=0; i<5; i++) { http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java ---------------------------------------------------------------------- diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index 7691586..db10c5a 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -76,7 +76,7 @@ public class TestZooKeeperScanPolicyObserver { .setTimeToLive(1); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); - Table t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); + Table t = TEST_UTIL.getConnection().getTable(tableName); long now = EnvironmentEdgeManager.currentTime(); ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "test", null); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java index 9e00087..75ef959 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java @@ -27,6 +27,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -224,7 +226,8 @@ public class IntegrationTestLazyCfLoading { long maxRuntime = conf.getLong(timeoutKey, DEFAULT_TIMEOUT_MINUTES); long serverCount = util.getHBaseClusterInterface().getClusterStatus().getServersSize(); long keysToWrite = serverCount * KEYS_TO_WRITE_PER_SERVER; - Table table = new HTable(conf, TABLE_NAME); + Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(TABLE_NAME); // Create multi-threaded writer and start it. We write multiple columns/CFs and verify // their integrity, therefore multi-put is necessary. @@ -288,5 +291,6 @@ public class IntegrationTestLazyCfLoading { Assert.assertEquals("There are write failures", 0, writer.getNumWriteFailures()); Assert.assertTrue("Writer is not done", isWriterDone); // Assert.fail("Boom!"); + connection.close(); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java index b1ea8e5..7531f55 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java @@ -20,40 +20,35 @@ package org.apache.hadoop.hbase.chaos.actions; import java.io.IOException; import java.util.Collection; +import java.util.List; import org.apache.commons.lang.math.RandomUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; /** * Action that restarts an HRegionServer holding one of the regions of the table. */ public class RestartRsHoldingTableAction extends RestartActionBaseAction { - private final String tableName; + private final RegionLocator locator; - public RestartRsHoldingTableAction(long sleepTime, String tableName) { + public RestartRsHoldingTableAction(long sleepTime, RegionLocator locator) { super(sleepTime); - this.tableName = tableName; + this.locator = locator; } @Override public void perform() throws Exception { - HTable table = null; - try { - LOG.info("Performing action: Restart random RS holding table " + this.tableName); - Configuration conf = context.getHBaseIntegrationTestingUtility().getConfiguration(); - table = new HTable(conf, TableName.valueOf(tableName)); - } catch (IOException e) { - LOG.debug("Error creating HTable used to get list of region locations.", e); - return; - } - - Collection<ServerName> serverNames = table.getRegionLocations().values(); - ServerName[] nameArray = serverNames.toArray(new ServerName[serverNames.size()]); - - restartRs(nameArray[RandomUtils.nextInt(nameArray.length)], sleepTime); + LOG.info("Performing action: Restart random RS holding table " + this.locator.getName()); + + List<HRegionLocation> locations = locator.getAllRegionLocations(); + restartRs(locations.get(RandomUtils.nextInt(locations.size())).getServerName(), sleepTime); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 4b07f8f..3d72447 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -144,7 +144,7 @@ public class IntegrationTestImportTsv implements Configurable, Tool { setCaching(1000); }}; try { - table = new HTable(getConf(), tableName); + table = util.getConnection().getTable(tableName); Iterator<Result> resultsIt = table.getScanner(scan).iterator(); Iterator<KeyValue> expectedIt = simple_expected.iterator(); while (resultsIt.hasNext() && expectedIt.hasNext()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 63430a1..feed860 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -189,7 +189,8 @@ public class IntegrationTestMTTR { // Set up the action that will restart a region server holding a region from our table // because this table should only have one region we should be good. - restartRSAction = new RestartRsHoldingTableAction(sleepTime, tableName.getNameAsString()); + restartRSAction = new RestartRsHoldingTableAction(sleepTime, + util.getConnection().getRegionLocator(tableName)); // Set up the action that will kill the region holding meta. restartMetaAction = new RestartRsHoldingMetaAction(sleepTime); @@ -478,7 +479,7 @@ public class IntegrationTestMTTR { public PutCallable(Future<?> f) throws IOException { super(f); - this.table = new HTable(util.getConfiguration(), tableName); + this.table = util.getConnection().getTable(tableName); } @Override @@ -504,7 +505,7 @@ public class IntegrationTestMTTR { public ScanCallable(Future<?> f) throws IOException { super(f); - this.table = new HTable(util.getConfiguration(), tableName); + this.table = util.getConnection().getTable(tableName); } @Override @@ -545,7 +546,7 @@ public class IntegrationTestMTTR { protected boolean doAction() throws Exception { Admin admin = null; try { - admin = new HBaseAdmin(util.getConfiguration()); + admin = util.getHBaseAdmin(); ClusterStatus status = admin.getClusterStatus(); return status != null; } finally { http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index c7dae78..931fba4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -18,18 +18,7 @@ package org.apache.hadoop.hbase.test; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; - +import com.google.common.collect.Sets; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; @@ -48,26 +37,27 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.IntegrationTestBase; import org.apache.hadoop.hbase.IntegrationTestingUtility; -import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RegionSplitter; @@ -97,7 +87,17 @@ import org.apache.hadoop.util.ToolRunner; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.common.collect.Sets; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; /** * This is an integration test borrowed from goraci, written by Keith Turner, @@ -340,7 +340,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { byte[] id; long count = 0; int i; - HTable table; + Table table; + Connection connection; long numNodes; long wrap; int width; @@ -348,8 +349,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { @Override protected void setup(Context context) throws IOException, InterruptedException { id = Bytes.toBytes("Job: "+context.getJobID() + " Task: " + context.getTaskAttemptID()); - Configuration conf = context.getConfiguration(); - instantiateHTable(conf); + this.connection = ConnectionFactory.createConnection(context.getConfiguration()); + instantiateHTable(); this.width = context.getConfiguration().getInt(GENERATOR_WIDTH_KEY, WIDTH_DEFAULT); current = new byte[this.width][]; int wrapMultiplier = context.getConfiguration().getInt(GENERATOR_WRAP_KEY, WRAP_DEFAULT); @@ -361,8 +362,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } } - protected void instantiateHTable(Configuration conf) throws IOException { - table = new HTable(conf, getTableName(conf)); + protected void instantiateHTable() throws IOException { + table = connection.getTable(getTableName(connection.getConfiguration())); table.setAutoFlushTo(false); table.setWriteBufferSize(4 * 1024 * 1024); } @@ -370,6 +371,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { @Override protected void cleanup(Context context) throws IOException ,InterruptedException { table.close(); + connection.close(); } @Override @@ -876,7 +878,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { System.exit(-1); } - Table table = new HTable(getConf(), getTableName(getConf())); + Connection connection = ConnectionFactory.createConnection(getConf()); + Table table = connection.getTable(getTableName(getConf())); Scan scan = new Scan(); scan.setBatch(10000); @@ -906,6 +909,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } scanner.close(); table.close(); + connection.close(); return 0; } @@ -926,9 +930,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { org.apache.hadoop.hbase.client.Delete delete = new org.apache.hadoop.hbase.client.Delete(val); - Table table = new HTable(getConf(), getTableName(getConf())); - table.delete(delete); - table.close(); + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Table table = connection.getTable(getTableName(getConf()))) { + table.delete(delete); + } System.out.println("Delete successful"); return 0; @@ -970,7 +975,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null; int logEvery = cmd.hasOption('l') ? Integer.parseInt(cmd.getOptionValue('l')) : 1; - Table table = new HTable(getConf(), getTableName(getConf())); + Connection connection = ConnectionFactory.createConnection(getConf()); + Table table = connection.getTable(getTableName(getConf())); long numQueries = 0; // If isSpecificStart is set, only walk one list from that particular node. // Note that in case of circular (or P-shaped) list it will walk forever, as is @@ -1003,6 +1009,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } table.close(); + connection.close(); return 0; } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index dc517a5..50c638a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; @@ -182,9 +184,9 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB } @Override - protected void instantiateHTable(Configuration conf) throws IOException { + protected void instantiateHTable() throws IOException { for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) { - HTable table = new HTable(conf, getTableName(i)); + Table table = connection.getTable(getTableName(i)); table.setAutoFlushTo(true); //table.setWriteBufferSize(4 * 1024 * 1024); this.tables[i] = table; http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java index 60f20a5..6e10ba9 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java @@ -40,6 +40,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.IntegrationTestBase; import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -164,13 +167,14 @@ public void cleanUpCluster() throws Exception { extends Mapper<NullWritable, NullWritable, NullWritable, NullWritable> { protected long recordsToWrite; - protected HTable table; + protected Connection connection; + protected Table table; protected Configuration conf; protected int numBackReferencesPerRow; + protected String shortTaskId; protected Random rand = new Random(); - protected Counter rowsWritten, refsWritten; @Override @@ -179,7 +183,8 @@ public void cleanUpCluster() throws Exception { recordsToWrite = conf.getLong(NUM_TO_WRITE_KEY, NUM_TO_WRITE_DEFAULT); String tableName = conf.get(TABLE_NAME_KEY, TABLE_NAME_DEFAULT); numBackReferencesPerRow = conf.getInt(NUM_BACKREFS_KEY, NUM_BACKREFS_DEFAULT); - table = new HTable(conf, TableName.valueOf(tableName)); + this.connection = ConnectionFactory.createConnection(conf); + table = connection.getTable(TableName.valueOf(tableName)); table.setWriteBufferSize(4*1024*1024); table.setAutoFlushTo(false); @@ -198,6 +203,7 @@ public void cleanUpCluster() throws Exception { public void cleanup(Context context) throws IOException { table.flushCommits(); table.close(); + connection.close(); } @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java ---------------------------------------------------------------------- diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java index b1cf57e..1f313c3 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java @@ -124,7 +124,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { ResultScanner rs = null; try { innerScope = Trace.startSpan("Scan", Sampler.ALWAYS); - Table ht = new HTable(util.getConfiguration(), tableName); + Table ht = util.getConnection().getTable(tableName); Scan s = new Scan(); s.setStartRow(Bytes.toBytes(rowKeyQueue.take())); s.setBatch(7); @@ -174,7 +174,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { Table ht = null; try { - ht = new HTable(util.getConfiguration(), tableName); + ht = util.getConnection().getTable(tableName); } catch (IOException e) { e.printStackTrace(); } @@ -234,7 +234,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException { LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<Long>(25000); - HTable ht = new HTable(util.getConfiguration(), this.tableName); + Table ht = util.getConnection().getTable(this.tableName); byte[] value = new byte[300]; for (int x = 0; x < 5000; x++) { TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index 66483d7..e4a322a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -101,7 +101,7 @@ public class TestGzipFilter { Response response = client.put(path, headers, value_1_gzip); assertEquals(response.getCode(), 200); - Table table = new HTable(TEST_UTIL.getConfiguration(), TABLE); + Table table = TEST_UTIL.getConnection().getTable(TABLE); Get get = new Get(Bytes.toBytes(ROW_1)); get.addColumn(Bytes.toBytes(CFA), Bytes.toBytes("1")); Result result = table.get(get); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 7f0b1f5..5bd8fc8 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -137,7 +137,7 @@ public class TestScannersWithFilters { htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); admin.createTable(htd); - Table table = new HTable(TEST_UTIL.getConfiguration(), TABLE); + Table table = TEST_UTIL.getConnection().getTable(TABLE); // Insert first half for(byte [] ROW : ROWS_ONE) { Put p = new Put(ROW); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java index 5745954..41c036d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java @@ -17,23 +17,6 @@ */ package org.apache.hadoop.hbase.rest; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.StringWriter; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Random; - -import javax.xml.bind.JAXBContext; -import javax.xml.bind.JAXBException; -import javax.xml.bind.Marshaller; -import javax.xml.bind.Unmarshaller; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -42,7 +25,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; @@ -69,6 +51,21 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + @Category({RestTests.class, MediumTests.class}) public class TestScannersWithLabels { private static final TableName TABLE = TableName.valueOf("TestScannersWithLabels"); @@ -104,7 +101,7 @@ public class TestScannersWithLabels { + TOPSECRET)); puts.add(put); } - try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) { + try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { table.put(puts); } return puts.size(); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 77e89cd..7cbb2a2 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -87,7 +87,7 @@ public class TestTableResource { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); admin.createTable(htd); - HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); + HTable table = (HTable) TEST_UTIL.getConnection().getTable(TABLE); byte[] k = new byte[3]; byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN)); for (byte b1 = 'a'; b1 < 'z'; b1++) { http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 9516995..eb1fc98 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -101,7 +101,7 @@ public class TestRemoteTable { admin.createTable(htd); Table table = null; try { - table = new HTable(TEST_UTIL.getConfiguration(), TABLE); + table = TEST_UTIL.getConnection().getTable(TABLE); Put put = new Put(ROW_1); put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1); table.put(put); @@ -135,7 +135,7 @@ public class TestRemoteTable { public void testGetTableDescriptor() throws IOException { Table table = null; try { - table = new HTable(TEST_UTIL.getConfiguration(), TABLE); + table = TEST_UTIL.getConnection().getTable(TABLE); HTableDescriptor local = table.getTableDescriptor(); assertEquals(remoteTable.getTableDescriptor(), local); } finally { http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index f1fd7d2..eacba6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -30,7 +30,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -461,7 +462,8 @@ public class LocalHBaseCluster { Configuration conf = HBaseConfiguration.create(); LocalHBaseCluster cluster = new LocalHBaseCluster(conf); cluster.startup(); - Admin admin = new HBaseAdmin(conf); + Connection connection = ConnectionFactory.createConnection(conf); + Admin admin = connection.getAdmin(); try { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cluster.getClass().getName())); @@ -469,6 +471,7 @@ public class LocalHBaseCluster { } finally { admin.close(); } + connection.close(); cluster.shutdown(); } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 312a3ae..eeb941a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.HTableWrapper; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet; import org.apache.hadoop.hbase.util.VersionInfo; @@ -325,7 +324,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> { final ClassLoader systemClassLoader = this.getClass().getClassLoader(); for (E env : coprocessors) { ClassLoader cl = env.getInstance().getClass().getClassLoader(); - if (cl != systemClassLoader ){ + if (cl != systemClassLoader){ //do not include system classloader externalClassLoaders.add(cl); } @@ -434,7 +433,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> { } catch (IOException e) { // nothing can be done here LOG.warn("Failed to close " + - Bytes.toStringBinary(table.getTableName()), e); + table.getName(), e); } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java index e1220fb..368510f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java @@ -25,8 +25,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.FileInputFormat; @@ -58,8 +59,8 @@ public class TableInputFormat extends TableInputFormatBase implements } setInputColumns(m_cols); try { - setHTable( - new HTable(HBaseConfiguration.create(job), TableName.valueOf(tableNames[0].getName()))); + Connection connection = ConnectionFactory.createConnection(job); + setHTable((HTable) connection.getTable(TableName.valueOf(tableNames[0].getName()))); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index fbfd984..2a50efc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.InputFormat; @@ -158,7 +159,7 @@ implements InputFormat<ImmutableBytesWritable, Result> { /** * Allows subclasses to get the {@link HTable}. */ - protected HTable getHTable() { + protected Table getHTable() { return this.table; } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index e88d6df..7584bc2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -34,10 +34,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.GenericOptionsParser; @@ -156,8 +156,9 @@ public class CopyTable extends Configured implements Tool { System.out.println("HFiles will be stored at " + this.bulkloadDir); HFileOutputFormat2.setOutputPath(job, bulkloadDir); try (Connection conn = ConnectionFactory.createConnection(getConf()); - Table htable = conn.getTable(TableName.valueOf(dstTableName))) { - HFileOutputFormat2.configureIncrementalLoadMap(job, htable); + Admin admin = conn.getAdmin()) { + HFileOutputFormat2.configureIncrementalLoadMap(job, + admin.getTableDescriptor((TableName.valueOf(dstTableName)))); } } else { TableMapReduceUtil.initTableMapperJob(tableName, scan, @@ -192,7 +193,8 @@ public class CopyTable extends Configured implements Tool { System.err.println(" versions number of cell versions to copy"); System.err.println(" new.name new table's name"); System.err.println(" peer.adr Address of the peer cluster given in the format"); - System.err.println(" hbase.zookeeer.quorum:hbase.zookeeper.client.port:zookeeper.znode.parent"); + System.err.println(" hbase.zookeeper.quorum:hbase.zookeeper.client" + + ".port:zookeeper.znode.parent"); System.err.println(" families comma-separated list of families to copy"); System.err.println(" To copy from cf1 to cf2, give sourceCfName:destCfName. "); System.err.println(" To keep the same name, just give \"cfName\""); @@ -298,7 +300,7 @@ public class CopyTable extends Configured implements Tool { if (i == args.length-1) { tableName = cmd; } else { - printUsage("Invalid argument '" + cmd + "'" ); + printUsage("Invalid argument '" + cmd + "'"); return false; } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 9ccaf95..f69f21f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -449,7 +449,8 @@ public class HFileOutputFormat2 LOG.info("Incremental table " + regionLocator.getName() + " output configured."); } - public static void configureIncrementalLoadMap(Job job, Table table) throws IOException { + public static void configureIncrementalLoadMap(Job job, HTableDescriptor tableDescriptor) throws + IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); @@ -457,15 +458,14 @@ public class HFileOutputFormat2 job.setOutputFormatClass(HFileOutputFormat2.class); // Set compression algorithms based on column families - configureCompression(conf, table.getTableDescriptor()); - configureBloomType(table.getTableDescriptor(), conf); - configureBlockSize(table.getTableDescriptor(), conf); - HTableDescriptor tableDescriptor = table.getTableDescriptor(); + configureCompression(conf, tableDescriptor); + configureBloomType(tableDescriptor, conf); + configureBlockSize(tableDescriptor, conf); configureDataBlockEncoding(tableDescriptor, conf); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); - LOG.info("Incremental table " + table.getName() + " output configured."); + LOG.info("Incremental table " + tableDescriptor.getTableName() + " output configured."); } /** @@ -483,8 +483,7 @@ public class HFileOutputFormat2 Map<byte[], Algorithm> compressionMap = new TreeMap<byte[], Algorithm>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { - Algorithm algorithm = AbstractHFileWriter.compressionByName - (e.getValue()); + Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue()); compressionMap.put(e.getKey(), algorithm); } return compressionMap; @@ -602,7 +601,7 @@ public class HFileOutputFormat2 * Serialize column family to compression algorithm map to configuration. * Invoked while configuring the MR job for incremental load. * - * @param table to read the properties from + * @param tableDescriptor to read the properties from * @param conf to persist serialized values into * @throws IOException * on failure to read column family descriptors @@ -705,7 +704,7 @@ public class HFileOutputFormat2 * Serialize column family to data block encoding map to configuration. * Invoked while configuring the MR job for incremental load. * - * @param table to read the properties from + * @param tableDescriptor to read the properties from * @param conf to persist serialized values into * @throws IOException * on failure to read column family descriptors http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index bd44518..9c5b5af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; @@ -512,18 +513,23 @@ public class Import extends Configured implements Tool { public static void flushRegionsIfNecessary(Configuration conf) throws IOException, InterruptedException { String tableName = conf.get(TABLE_NAME); - HBaseAdmin hAdmin = null; + Admin hAdmin = null; + Connection connection = null; String durability = conf.get(WAL_DURABILITY); // Need to flush if the data is written to hbase and skip wal is enabled. if (conf.get(BULK_OUTPUT_CONF_KEY) == null && durability != null && Durability.SKIP_WAL.name().equalsIgnoreCase(durability)) { try { - hAdmin = new HBaseAdmin(conf); - hAdmin.flush(tableName); + connection = ConnectionFactory.createConnection(conf); + hAdmin = connection.getAdmin(); + hAdmin.flush(TableName.valueOf(tableName)); } finally { if (hAdmin != null) { hAdmin.close(); } + if (connection != null) { + connection.close(); + } } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 90f2f0e..d4394eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -477,7 +477,7 @@ public class ImportTsv extends Configured implements Tool { job.setInputFormatClass(TextInputFormat.class); job.setMapperClass(mapperClass); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); - String columns[] = conf.getStrings(COLUMNS_CONF_KEY); + String[] columns = conf.getStrings(COLUMNS_CONF_KEY); if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { String fileLoc = conf.get(CREDENTIALS_LOCATION); Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); @@ -662,7 +662,7 @@ public class ImportTsv extends Configured implements Tool { // TODO: validation for TsvImporterMapper, not this tool. Move elsewhere. if (null == getConf().get(MAPPER_CONF_KEY)) { // Make sure columns are specified - String columns[] = getConf().getStrings(COLUMNS_CONF_KEY); + String[] columns = getConf().getStrings(COLUMNS_CONF_KEY); if (columns == null) { usage("No columns specified. Please specify with -D" + COLUMNS_CONF_KEY+"=..."); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 51a5458..7866c10 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; @@ -932,7 +933,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } Path hfofDir = new Path(dirPath); - HTable table = new HTable(getConf(), tableName); + Connection connection = ConnectionFactory.createConnection(getConf()); + HTable table = (HTable) connection.getTable(tableName); doBulkLoad(hfofDir, table); return 0; http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java index 20cf50a..0c8e76f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java @@ -29,11 +29,13 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.JobContext; @@ -73,7 +75,8 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, protected static class MultiTableRecordWriter extends RecordWriter<ImmutableBytesWritable, Mutation> { private static final Log LOG = LogFactory.getLog(MultiTableRecordWriter.class); - Map<ImmutableBytesWritable, HTable> tables; + Connection connection; + Map<ImmutableBytesWritable, Table> tables; Configuration conf; boolean useWriteAheadLogging; @@ -85,10 +88,10 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, * <tt>false</tt>) to improve performance when bulk loading data. */ public MultiTableRecordWriter(Configuration conf, - boolean useWriteAheadLogging) { + boolean useWriteAheadLogging) throws IOException { LOG.debug("Created new MultiTableRecordReader with WAL " + (useWriteAheadLogging ? "on" : "off")); - this.tables = new HashMap<ImmutableBytesWritable, HTable>(); + this.tables = new HashMap<ImmutableBytesWritable, Table>(); this.conf = conf; this.useWriteAheadLogging = useWriteAheadLogging; } @@ -100,10 +103,14 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, * @throws IOException * if there is a problem opening a table */ - HTable getTable(ImmutableBytesWritable tableName) throws IOException { + Table getTable(ImmutableBytesWritable tableName) throws IOException { + if(this.connection == null){ + this.connection = ConnectionFactory.createConnection(conf); + } if (!tables.containsKey(tableName)) { LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing"); - HTable table = new HTable(conf, TableName.valueOf(tableName.get())); + + Table table = connection.getTable(TableName.valueOf(tableName.get())); table.setAutoFlushTo(false); tables.put(tableName, table); } @@ -112,9 +119,12 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, @Override public void close(TaskAttemptContext context) throws IOException { - for (HTable table : tables.values()) { + for (Table table : tables.values()) { table.flushCommits(); } + if(connection != null){ + connection.close(); + } } /** @@ -129,7 +139,7 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, */ @Override public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException { - HTable table = getTable(tableName); + Table table = getTable(tableName); // The actions are not immutable, so we defensively copy them if (action instanceof Put) { Put put = new Put((Put) action); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 06ab5c4..67d9b0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -115,7 +115,7 @@ public class WALPlayer extends Configured implements Tool { @Override public void setup(Context context) throws IOException { // only a single table is supported when HFiles are generated with HFileOutputFormat - String tables[] = context.getConfiguration().getStrings(TABLES_KEY); + String[] tables = context.getConfiguration().getStrings(TABLES_KEY); if (tables == null || tables.length != 1) { // this can only happen when WALMapper is used directly by a class other than WALPlayer throw new IOException("Exactly one table must be specified for bulk HFile case."); http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index c091312..b6d43de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -24,11 +24,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -84,9 +88,13 @@ public class VerifyReplication extends Configured implements Tool { public static class Verifier extends TableMapper<ImmutableBytesWritable, Put> { + + public static enum Counters { GOODROWS, BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, CONTENT_DIFFERENT_ROWS} + private Connection connection; + private Table replicatedTable; private ResultScanner replicatedScanner; private Result currentCompareRowInPeerTable; @@ -129,8 +137,8 @@ public class VerifyReplication extends Configured implements Tool { ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey); TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); - // TODO: THis HTable doesn't get closed. Fix! - Table replicatedTable = new HTable(peerConf, tableName); + connection = ConnectionFactory.createConnection(peerConf); + replicatedTable = connection.getTable(tableName); scan.setStartRow(value.getRow()); scan.setStopRow(tableSplit.getEndRow()); replicatedScanner = replicatedTable.getScanner(scan); @@ -191,6 +199,20 @@ public class VerifyReplication extends Configured implements Tool { replicatedScanner = null; } } + if(replicatedTable != null){ + try{ + replicatedTable.close(); + } catch (Exception e) { + LOG.error("fail to close table in cleanup", e); + } + } + if(connection != null){ + try { + connection.close(); + } catch (Exception e) { + LOG.error("fail to close connection in cleanup", e); + } + } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index c577abf..78c7a06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -37,12 +37,13 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; @@ -245,7 +246,8 @@ class HMerge { throws IOException { super(conf, fs, tableName); this.tableName = tableName; - this.table = new HTable(conf, TableName.META_TABLE_NAME); + Connection connection = ConnectionFactory.createConnection(conf); + this.table = connection.getTable(TableName.META_TABLE_NAME); this.metaScanner = table.getScanner(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); this.latestRegion = null; http://git-wip-us.apache.org/repos/asf/hbase/blob/9246af8d/hbase-server/src/main/resources/hbase-webapps/master/table.jsp ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 1f1871c..c9878bf 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -24,7 +24,6 @@ import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.HTable" import="org.apache.hadoop.hbase.client.Admin" - import="org.apache.hadoop.hbase.client.HConnectionManager" import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.ServerLoad" @@ -33,17 +32,16 @@ import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator" import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.FSUtils" - import="org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest" import="org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState" import="org.apache.hadoop.hbase.TableName" - import="org.apache.hadoop.hbase.client.RegionReplicaUtil" import="org.apache.hadoop.hbase.HBaseConfiguration" %> <% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); + MetaTableLocator metaTableLocator = new MetaTableLocator(); String fqtn = request.getParameter("name"); - HTable table = new HTable(conf, fqtn); + HTable table = (HTable) master.getConnection().getTable(fqtn); String tableHeader; boolean withReplica = false; if (table.getTableDescriptor().getRegionReplication() > 1) {
