Repository: phoenix Updated Branches: refs/heads/4.0 b48ca7b5c -> 4c0d00bdd
PHOENIX-1170 Change status of local index during splitting to prevent usage when slower than query through data table (Rajeshbabu) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4c0d00bd Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4c0d00bd Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4c0d00bd Branch: refs/heads/4.0 Commit: 4c0d00bdd32e1853e929729f396dda567dc6faeb Parents: b48ca7b Author: James Taylor <jtay...@salesforce.com> Authored: Mon Oct 27 14:29:07 2014 -0700 Committer: James Taylor <jtay...@salesforce.com> Committed: Mon Oct 27 14:29:07 2014 -0700 ---------------------------------------------------------------------- .../org/apache/phoenix/end2end/QueryIT.java | 20 ++- .../phoenix/end2end/index/LocalIndexIT.java | 152 ++++++++++++++++--- .../IndexHalfStoreFileReaderGenerator.java | 63 ++++++++ .../hbase/regionserver/LocalIndexSplitter.java | 40 +++++ .../java/org/apache/phoenix/query/BaseTest.java | 1 + 5 files changed, 250 insertions(+), 26 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java index cc431c1..f45b689 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java @@ -50,6 +50,7 @@ import java.sql.Timestamp; import java.util.Arrays; import java.util.Properties; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -60,6 +61,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.schema.PDataType; import org.apache.phoenix.schema.SequenceNotFoundException; import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.junit.Test; @@ -761,14 +763,16 @@ public class QueryIT extends BaseQueryIT { HTable htable = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(tableName); htable.clearRegionCache(); int nRegions = htable.getRegionLocations().size(); - admin.split(tableName, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A" + Character.valueOf((char) ('3' + nextRunCount())) + ts))); // vary split point with test run - int retryCount = 0; - do { - Thread.sleep(2000); - retryCount++; - //htable.clearRegionCache(); - } while (retryCount < 10 && htable.getRegionLocations().size() == nRegions); - assertNotEquals(nRegions, htable.getRegionLocations().size()); + if(!admin.tableExists(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(ATABLE_NAME)))) { + admin.split(tableName, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A" + Character.valueOf((char) ('3' + nextRunCount())) + ts))); // vary split point with test run + int retryCount = 0; + do { + Thread.sleep(2000); + retryCount++; + //htable.clearRegionCache(); + } while (retryCount < 10 && htable.getRegionLocations().size() == nRegions); + assertNotEquals(nRegions, htable.getRegionLocations().size()); + } statement.setString(1, tenantId); rs = statement.executeQuery(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java index 019e0fb..7fa69d4 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java @@ -24,22 +24,31 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator; +import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.QueryPlan; @@ -51,12 +60,15 @@ import org.apache.phoenix.jdbc.PhoenixPreparedStatement; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; +import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PTableKey; +import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.StringUtil; import org.apache.phoenix.util.TestUtil; import org.junit.BeforeClass; import org.junit.Test; @@ -65,6 +77,9 @@ import com.google.common.collect.Maps; public class LocalIndexIT extends BaseIndexIT { + private static CountDownLatch latch1 = new CountDownLatch(1); + private static CountDownLatch latch2 = new CountDownLatch(1); + @BeforeClass public static void doSetup() throws Exception { Map<String,String> props = Maps.newHashMapWithExpectedSize(3); @@ -651,25 +666,25 @@ public class LocalIndexIT extends BaseIndexIT { HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); for (int i = 1; i < 5; i++) { + CatalogTracker ct = new CatalogTracker(admin.getConfiguration()); + admin.split(Bytes.toBytes(DATA_TABLE_NAME), ByteUtil.concat(Bytes.toBytes(strings[3*i]))); + List<HRegionInfo> regionsOfUserTable = + MetaReader.getTableRegions(ct, TableName.valueOf(DATA_TABLE_NAME), false); - admin.split(Bytes.toBytes(DATA_TABLE_NAME), ByteUtil.concat(Bytes.toBytes(strings[3*i]))); - List<HRegionInfo> regionsOfUserTable = admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME)); + while (regionsOfUserTable.size() != (4+i)) { + Thread.sleep(100); + regionsOfUserTable = MetaReader.getTableRegions(ct, TableName.valueOf(DATA_TABLE_NAME), false); + } + assertEquals(4+i, regionsOfUserTable.size()); + TableName indexTable = + TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME)); + List<HRegionInfo> regionsOfIndexTable = + MetaReader.getTableRegions(ct, indexTable, false); - while (regionsOfUserTable.size() != (4+i)) { - Thread.sleep(100); - regionsOfUserTable = admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME)); - } - assertEquals(4+i, regionsOfUserTable.size()); - List<HRegionInfo> regionsOfIndexTable = - admin.getTableRegions(TableName.valueOf(MetaDataUtil - .getLocalIndexTableName(DATA_TABLE_NAME))); - - while (regionsOfIndexTable.size() != (4+i)) { - Thread.sleep(100); - regionsOfIndexTable = - admin.getTableRegions(TableName.valueOf(MetaDataUtil - .getLocalIndexTableName(DATA_TABLE_NAME))); - } + while (regionsOfIndexTable.size() != (4 + i)) { + Thread.sleep(100); + regionsOfIndexTable = MetaReader.getTableRegions(ct, indexTable, false); + } assertEquals(4 + i, regionsOfIndexTable.size()); String query = "SELECT t_id,k1,v1 FROM " + DATA_TABLE_NAME; rs = conn1.createStatement().executeQuery(query); @@ -707,4 +722,105 @@ public class LocalIndexIT extends BaseIndexIT { conn1.close(); } } + + @Test + public void testLocalIndexStateWhenSplittingInProgress() throws Exception { + createBaseTable(DATA_TABLE_NAME+"2", null, "('e','j','o')"); + Connection conn1 = DriverManager.getConnection(getUrl()); + try{ + String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"}; + for (int i = 0; i < 26; i++) { + conn1.createStatement().execute( + "UPSERT INTO " + DATA_TABLE_NAME+"2" + " values('"+strings[i]+"'," + i + "," + + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')"); + } + conn1.commit(); + conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME+"2" + "(v1)"); + conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + "_2 ON " + DATA_TABLE_NAME+"2" + "(k3)"); + + ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_NAME+"2"); + assertTrue(rs.next()); + HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); + HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(DATA_TABLE_NAME+"2")); + tableDesc.removeCoprocessor(LocalIndexSplitter.class.getName()); + tableDesc.addCoprocessor(MockedLocalIndexSplitter.class.getName(), null, + 1, null); + admin.disableTable(tableDesc.getTableName()); + admin.modifyTable(tableDesc.getTableName(), tableDesc); + admin.enableTable(tableDesc.getTableName()); + TableName indexTable = + TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME+"2")); + HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable); + indexTableDesc.removeCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName()); + indexTableDesc.addCoprocessor(MockedIndexHalfStoreFileReaderGenerator.class.getName(), null, + 1, null); + admin.disableTable(indexTable); + admin.modifyTable(indexTable, indexTableDesc); + admin.enableTable(indexTable); + + admin.split(Bytes.toBytes(DATA_TABLE_NAME+"2"), ByteUtil.concat(Bytes.toBytes(strings[3]))); + List<HRegionInfo> regionsOfUserTable = + admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME+"2")); + + while (regionsOfUserTable.size() != 5) { + Thread.sleep(100); + regionsOfUserTable = admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME+"2")); + } + assertEquals(5, regionsOfUserTable.size()); + + List<HRegionInfo> regionsOfIndexTable = admin.getTableRegions(indexTable); + + while (regionsOfIndexTable.size() != 5) { + Thread.sleep(100); + regionsOfIndexTable = admin.getTableRegions(indexTable); + } + + assertEquals(5, regionsOfIndexTable.size()); + latch1.await(); + // Verify the metadata for index is correct. + rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME, + new String[] { PTableType.INDEX.toString() }); + assertTrue(rs.next()); + assertEquals(INDEX_TABLE_NAME, rs.getString(3)); + assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE")); + assertFalse(rs.next()); + rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME+"_2", + new String[] { PTableType.INDEX.toString() }); + assertTrue(rs.next()); + assertEquals(INDEX_TABLE_NAME+"_2", rs.getString(3)); + assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE")); + assertFalse(rs.next()); + + String query = "SELECT t_id,k1,v1 FROM " + DATA_TABLE_NAME+"2"; + rs = conn1.createStatement().executeQuery("EXPLAIN " + query); + assertEquals("CLIENT PARALLEL " + 1 + "-WAY FULL SCAN OVER " + DATA_TABLE_NAME+"2", + QueryUtil.getExplainPlan(rs)); + latch2.countDown(); + } finally { + conn1.close(); + latch1.countDown(); + latch2.countDown(); + } + } + + public static class MockedIndexHalfStoreFileReaderGenerator extends IndexHalfStoreFileReaderGenerator { + @Override + public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, + StoreFile resultFile) throws IOException { + try { + latch2.await(); + } catch (InterruptedException e1) { + } + super.postCompact(e, store, resultFile); + } + } + + public static class MockedLocalIndexSplitter extends LocalIndexSplitter { + @Override + public void preSplitAfterPONR(ObserverContext<RegionCoprocessorEnvironment> ctx) + throws IOException { + super.preSplitAfterPONR(ctx); + latch1.countDown(); + } + } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java index b04227f..2fbea22 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java +++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.sql.SQLException; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -43,9 +44,14 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.parse.AlterIndexStatement; +import org.apache.phoenix.parse.ParseNodeFactory; +import org.apache.phoenix.schema.MetaDataClient; import org.apache.phoenix.schema.PColumn; +import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.PTable.IndexType; @@ -57,6 +63,10 @@ import org.apache.phoenix.util.QueryUtil; public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver { + int storeFilesCount = 0; + int compactedFilesCount = 0; + private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); + @Override public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, @@ -126,6 +136,59 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver { return reader; } + @Override + public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, + Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, + long earliestPutTs, InternalScanner s, CompactionRequest request) throws IOException { + InternalScanner internalScanner = super.preCompactScannerOpen(c, store, scanners, scanType, earliestPutTs, s, request); + Collection<StoreFile> files = request.getFiles(); + storeFilesCount = 0; + compactedFilesCount = 0; + for(StoreFile file:files) { + if(!file.isReference()) { + return internalScanner; + } + } + storeFilesCount = files.size(); + return internalScanner; + } + + @Override + public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, + StoreFile resultFile) throws IOException { + super.postCompact(e, store, resultFile); + if(storeFilesCount > 0) compactedFilesCount++; + if(compactedFilesCount == storeFilesCount) { + PhoenixConnection conn = null; + try { + conn = QueryUtil.getConnection(e.getEnvironment().getConfiguration()).unwrap( + PhoenixConnection.class); + MetaDataClient client = new MetaDataClient(conn); + String userTableName = MetaDataUtil.getUserTableName(e.getEnvironment().getRegion().getTableDesc().getNameAsString()); + PTable dataTable = PhoenixRuntime.getTable(conn, userTableName); + List<PTable> indexes = dataTable.getIndexes(); + for (PTable index : indexes) { + if (index.getIndexType() == IndexType.LOCAL) { + AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, + org.apache.phoenix.parse.TableName.create(index.getSchemaName().getString(), index.getTableName().getString())), + dataTable.getTableName().getString(), false, PIndexState.ACTIVE); + client.alterIndex(indexStatement); + } + } + conn.commit(); + } catch (ClassNotFoundException ex) { + } catch (SQLException ex) { + } finally { + if (conn != null) { + try { + conn.close(); + } catch (SQLException ex) { + } + } + } + } + } + private byte[][] getViewConstants(PTable dataTable) { int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); byte[][] viewConstants = null; http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java index acdb353..9ec5d01 100644 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java +++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.sql.SQLException; import java.util.List; import org.apache.commons.logging.Log; @@ -35,9 +36,19 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.IndexSplitTransaction; import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.parse.AlterIndexStatement; +import org.apache.phoenix.parse.ParseNodeFactory; +import org.apache.phoenix.parse.TableName; +import org.apache.phoenix.schema.MetaDataClient; import org.apache.phoenix.schema.PDataType; +import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.MetaDataUtil; +import org.apache.phoenix.util.PhoenixRuntime; +import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SchemaUtil; public class LocalIndexSplitter extends BaseRegionObserver { @@ -46,6 +57,7 @@ public class LocalIndexSplitter extends BaseRegionObserver { private IndexSplitTransaction st = null; private PairOfSameType<HRegion> daughterRegions = null; + private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); @Override public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx, @@ -104,6 +116,34 @@ public class LocalIndexSplitter extends BaseRegionObserver { throws IOException { if (st == null || daughterRegions == null) return; RegionCoprocessorEnvironment environment = ctx.getEnvironment(); + PhoenixConnection conn = null; + try { + conn = QueryUtil.getConnection(ctx.getEnvironment().getConfiguration()).unwrap( + PhoenixConnection.class); + MetaDataClient client = new MetaDataClient(conn); + String userTableName = ctx.getEnvironment().getRegion().getTableDesc().getNameAsString(); + PTable dataTable = PhoenixRuntime.getTable(conn, userTableName); + List<PTable> indexes = dataTable.getIndexes(); + for (PTable index : indexes) { + if (index.getIndexType() == IndexType.LOCAL) { + AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, + TableName.create(index.getSchemaName().getString(), index.getTableName().getString())), + dataTable.getTableName().getString(), false, PIndexState.INACTIVE); + client.alterIndex(indexStatement); + } + } + conn.commit(); + } catch (ClassNotFoundException ex) { + } catch (SQLException ex) { + } finally { + if (conn != null) { + try { + conn.close(); + } catch (SQLException ex) { + } + } + } + HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); st.stepsAfterPONR(rs, rs, daughterRegions); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java index e0b0a96..449abd6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java @@ -634,6 +634,7 @@ public abstract class BaseTest { conf.setInt("hbase.hlog.asyncer.number", 2); conf.setInt("hbase.assignment.zkevent.workers", 5); conf.setInt("hbase.assignment.threads.max", 5); + conf.setInt("hbase.catalogjanitor.interval", 5000); return conf; }