LeonBein commented on a change in pull request #15109: URL: https://github.com/apache/flink/pull/15109#discussion_r592553073
########## File path: flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/testutil/HBaseTestClusterUtil.java ########## @@ -0,0 +1,322 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.connector.hbase.testutil; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.StartMiniClusterOption; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.security.UserGroupInformation; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.NodeList; +import org.xml.sax.SAXException; + +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** Provides static access to a {@link MiniHBaseCluster} for testing. */ +public class HBaseTestClusterUtil { + + public static final String COLUMN_FAMILY_BASE = "info"; + public static final String DEFAULT_COLUMN_FAMILY = COLUMN_FAMILY_BASE + 0; + public static final String QUALIFIER_BASE = "qualifier"; + public static final String DEFAULT_QUALIFIER = QUALIFIER_BASE + 0; + + public final String configPath = "config" + UUID.randomUUID() + ".xml"; + private MiniHBaseCluster cluster; + private Configuration hbaseConf; + private String testFolder; + + public HBaseTestClusterUtil() {} + + public static void main(String[] args) + throws ParserConfigurationException, SAXException, IOException { + Arrays.asList(HdfsConstants.class.getDeclaredFields()).forEach(System.out::println); + HBaseTestClusterUtil hbaseTestClusterUtil = new HBaseTestClusterUtil(); + hbaseTestClusterUtil.startCluster(); + hbaseTestClusterUtil.makeTable("tableName"); + } + + public void startCluster() throws IOException { + System.out.println("Starting HBase test cluster ..."); + testFolder = Files.createTempDirectory(null).toString(); + + // Fallback for windows users with space in user name, will not work if path contains space. + if (testFolder.contains(" ")) { + testFolder = "/flink-hbase-test-data/"; + } + UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser("tempusername")); + + hbaseConf = HBaseConfiguration.create(); + hbaseConf.setInt("replication.stats.thread.period.seconds", 5); + hbaseConf.setLong("replication.sleep.before.failover", 2000); + hbaseConf.setInt("replication.source.maxretriesmultiplier", 10); + hbaseConf.setBoolean("hbase.replication", true); + + System.setProperty(HBaseTestingUtility.BASE_TEST_DIRECTORY_KEY, testFolder); + System.out.println("Testfolder: " + testFolder); + + HBaseTestingUtility utility = new HBaseTestingUtility(hbaseConf); + System.out.println(utility.getDataTestDir().toString()); + try { + cluster = + utility.startMiniCluster( + StartMiniClusterOption.builder().numRegionServers(3).build()); + int numRegionServers = utility.getHBaseCluster().getRegionServerThreads().size(); + System.out.println(numRegionServers); + + System.out.println(hbaseConf.get("hbase.zookeeper.quorum")); + System.out.println(hbaseConf.get("hbase.zookeeper.property.clientPort")); + System.out.println(hbaseConf.get("hbase.master.info.port")); + System.out.println(hbaseConf.get("hbase.master.port")); + System.out.println(hbaseConf.get("hbase.master.info.port")); + System.out.println(hbaseConf.get("hbase.master.info.port")); + + cluster.waitForActiveAndReadyMaster(30 * 1000); + try { + HBaseAdmin.available(hbaseConf); + System.out.println("HBase test cluster up and running ..."); + } catch (IOException e1) { + e1.printStackTrace(); + Throwable e = e1; + while (e.getCause() != null) { + e = e.getCause(); + } + e.printStackTrace(); + System.exit(1); + } + + hbaseConf.writeXml(new FileOutputStream(configPath)); + + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void shutdownCluster() + throws IOException, InterruptedException, ExecutionException, TimeoutException { + System.out.println("Shutting down HBase test cluster"); + clearTables(); + clearReplicationPeers(); + cluster.shutdown(); + new File(configPath).delete(); + CompletableFuture.runAsync(cluster::waitUntilShutDown).get(240, TimeUnit.SECONDS); + Paths.get(testFolder).toFile().delete(); + System.out.println("HBase test cluster shut down"); + } + + public boolean isClusterAlreadyRunning() throws InterruptedException, ExecutionException { + try { + return CompletableFuture.supplyAsync( + () -> { + try (Connection connection = + ConnectionFactory.createConnection(getConfig())) { + return true; + } catch (ParserConfigurationException + | IOException + | SAXException e) { + e.printStackTrace(); + return false; + } + }) + .get(10, TimeUnit.SECONDS); + } catch (TimeoutException e) { + System.out.println("Trying to connect to HBase test cluster timed out"); + e.printStackTrace(System.out); + return false; + } + } + + public void clearTables() { + try (Admin admin = ConnectionFactory.createConnection(getConfig()).getAdmin()) { + for (TableDescriptor table : admin.listTableDescriptors()) { + admin.disableTable(table.getTableName()); + admin.deleteTable(table.getTableName()); + } + } catch (ParserConfigurationException | IOException | SAXException e) { + e.printStackTrace(); + } + } + + public void clearReplicationPeers() { + try (Admin admin = ConnectionFactory.createConnection(getConfig()).getAdmin()) { + for (ReplicationPeerDescription desc : admin.listReplicationPeers()) { + System.out.println("==== " + desc.getPeerId() + " ===="); + System.out.println(desc); + admin.removeReplicationPeer(desc.getPeerId()); + } + } catch (SAXException | IOException | ParserConfigurationException e) { + e.printStackTrace(); + } + } + + public List<ReplicationPeerDescription> getReplicationPeers() { + try (Admin admin = ConnectionFactory.createConnection(getConfig()).getAdmin()) { + return admin.listReplicationPeers(); + } catch (SAXException | IOException | ParserConfigurationException e) { + e.printStackTrace(); + return null; + } + } + + public void makeTable(String tableName) { + makeTable(tableName, 1); + } + + /** + * Creates a table for given name with given number of column families. Column family names + * start with {@link HBaseTestClusterUtil#COLUMN_FAMILY_BASE} and are indexed, if more than one + * is requested + */ + public void makeTable(String tableName, int numColumnFamilies) { + assert numColumnFamilies >= 1; + try (Admin admin = ConnectionFactory.createConnection(getConfig()).getAdmin()) { + TableName tableNameObj = TableName.valueOf(tableName); + if (!admin.tableExists(tableNameObj)) { + TableDescriptorBuilder tableBuilder = + TableDescriptorBuilder.newBuilder(tableNameObj); + for (int i = 0; i < numColumnFamilies; i++) { + ColumnFamilyDescriptorBuilder cfBuilder = + ColumnFamilyDescriptorBuilder.newBuilder( + Bytes.toBytes(COLUMN_FAMILY_BASE + i)); + cfBuilder.setScope(1); + tableBuilder.setColumnFamily(cfBuilder.build()); + } + admin.createTable(tableBuilder.build()); + } + } catch (SAXException | IOException | ParserConfigurationException e) { + e.printStackTrace(); + } + } + + public void commitPut(String tableName, Put put) { + try (Table htable = + ConnectionFactory.createConnection(getConfig()) + .getTable(TableName.valueOf(tableName))) { + htable.put(put); + System.out.println("Added row " + Bytes.toString(put.getRow())); + } catch (IOException | SAXException | ParserConfigurationException e) { + e.printStackTrace(); + } + } + + public String put(String tableName, String value) { + try (Table htable = + ConnectionFactory.createConnection(getConfig()) + .getTable(TableName.valueOf(tableName))) { + String uuid = UUID.randomUUID().toString(); + byte[] rowkey = Bytes.toBytes(uuid); + byte[] columnFamily = DEFAULT_COLUMN_FAMILY.getBytes(); + byte[] qualifier = DEFAULT_QUALIFIER.getBytes(); + byte[] payload = value.getBytes(); + Put put = new Put(rowkey).addColumn(columnFamily, qualifier, payload); + htable.put(put); + System.out.println("Added row " + uuid); + return uuid; + } catch (IOException | SAXException | ParserConfigurationException e) { + e.printStackTrace(); + return null; + } + } + + public void delete(String tableName, String rowKey, String columnFamily, String qualifier) { + try (Table htable = + ConnectionFactory.createConnection(getConfig()) + .getTable(TableName.valueOf(tableName))) { + Delete delete = new Delete(rowKey.getBytes()); + delete.addColumn(columnFamily.getBytes(), qualifier.getBytes()); + htable.delete(delete); + System.out.println("Deleted row " + rowKey); + } catch (IOException | SAXException | ParserConfigurationException e) { + e.printStackTrace(); + } + } + + public void put(String tableName, int numColumnFamilies, String... values) { + assert numColumnFamilies >= 1; + assert values.length >= numColumnFamilies; + try (Table htable = + ConnectionFactory.createConnection(getConfig()) + .getTable(TableName.valueOf(tableName))) { + + String rowKey = UUID.randomUUID().toString(); + Put put = new Put(rowKey.getBytes()); + int index = 0; + for (int cf = 0; cf < numColumnFamilies; cf++) { + int cq = 0; + for (; index + cq < values.length * (cf + 1) / numColumnFamilies; cq++) { + put.addColumn( + (COLUMN_FAMILY_BASE + cf).getBytes(), + (QUALIFIER_BASE + cq).getBytes(), + values[index + cq].getBytes()); + } + index += cq; + } + htable.put(put); + System.out.println("Added row " + rowKey); + } catch (IOException | SAXException | ParserConfigurationException e) { + e.printStackTrace(); + } + } + + public Configuration getConfig() Review comment: Fixed by 2ca635487e07e0d23e1a094f9251606c8ce6bbc0 ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
