[4/7] git commit: PHOENIX-1315 Optimize query for Pig loader
PHOENIX-1315 Optimize query for Pig loader Conflicts: phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java phoenix-core/src/main/java/org/apache/phoenix/iterate/ConcatResultIterator.java phoenix-pig/src/main/java/org/apache/phoenix/pig/hadoop/PhoenixRecordReader.java Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ca478a72 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ca478a72 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ca478a72 Branch: refs/heads/3.0 Commit: ca478a720ee2fe49edd80fa2af7ed819d27876f7 Parents: 29361c6 Author: James Taylor jtay...@salesforce.com Authored: Sun Oct 5 09:53:14 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 01:35:24 2014 -0700 -- .../phoenix/end2end/EvaluationOfORIT.java | 9 ++-- ...ipRangeParallelIteratorRegionSplitterIT.java | 5 ++ .../org/apache/phoenix/compile/QueryPlan.java | 3 ++ .../apache/phoenix/execute/AggregatePlan.java | 6 +++ .../phoenix/execute/DegenerateQueryPlan.java| 12 - .../apache/phoenix/execute/HashJoinPlan.java| 5 ++ .../org/apache/phoenix/execute/ScanPlan.java| 8 +++ .../phoenix/iterate/ConcatResultIterator.java | 34 + .../iterate/LookAheadResultIterator.java| 21 .../phoenix/iterate/ParallelIterators.java | 39 -- .../apache/phoenix/jdbc/PhoenixStatement.java | 6 +++ .../phoenix/pig/PhoenixHBaseLoaderIT.java | 24 - .../phoenix/pig/hadoop/PhoenixInputFormat.java | 13 ++--- .../phoenix/pig/hadoop/PhoenixInputSplit.java | 53 ++-- .../phoenix/pig/hadoop/PhoenixRecordReader.java | 25 + 15 files changed, 184 insertions(+), 79 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca478a72/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java index 052ff43..0e59542 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java @@ -28,21 +28,22 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.Properties; +import org.apache.phoenix.util.PropertiesUtil; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category(BaseHBaseManagedTimeIT.class) +@Category(HBaseManagedTimeTest.class) public class EvaluationOfORIT extends BaseHBaseManagedTimeIT{ @Test public void testPKOrNotPKInOREvaluation() throws SQLException { - Properties props = new Properties(TEST_PROPERTIES); + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); conn.setAutoCommit(false); -String create = CREATE TABLE DIE ( ID INTEGER NOT NULL PRIMARY KEY,NAME VARCHAR(50) NOT NULL); +String create = CREATE TABLE DIE ( ID INTEGER NOT NULL PRIMARY KEY,NAME VARCHAR(50)); PreparedStatement createStmt = conn.prepareStatement(create); -createStmt.executeUpdate(); +createStmt.execute(); PreparedStatement stmt = conn.prepareStatement( upsert into + DIE VALUES (?, ?)); http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca478a72/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java index 3d057ae..18d7910 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java @@ -432,6 +432,11 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged public boolean isRowKeyOrdered() { return true; } + +@Override +public ListListScan getScans() { +return null; +} }, null, new
[6/7] git commit: PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl)
PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl) Conflicts: phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/002a9de6 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/002a9de6 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/002a9de6 Branch: refs/heads/3.0 Commit: 002a9de6a8dc6089ecc8bc27372e2424eccb855f Parents: f68f52a Author: James Taylor jtay...@salesforce.com Authored: Sun Oct 5 13:26:52 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 01:42:59 2014 -0700 -- .../phoenix/end2end/CoalesceFunctionIT.java | 67 ...ipRangeParallelIteratorRegionSplitterIT.java | 3 +- .../end2end/TenantSpecificTablesDDLIT.java | 2 +- .../phoenix/end2end/ToCharFunctionIT.java | 4 +- .../phoenix/end2end/ToNumberFunctionIT.java | 4 +- .../end2end/UpsertSelectAutoCommitIT.java | 23 +++ .../salted/SaltedTableVarLengthRowKeyIT.java| 8 +-- .../apache/phoenix/compile/FromCompiler.java| 32 +++--- .../apache/phoenix/compile/UpsertCompiler.java | 19 ++ .../apache/phoenix/execute/BaseQueryPlan.java | 6 -- 10 files changed, 103 insertions(+), 65 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/002a9de6/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java index 57599e6..45fcb48 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java @@ -67,7 +67,7 @@ public class CoalesceFunctionIT extends BaseHBaseManagedTimeIT { public void coalesceWithSumExplicitLong() throws Exception { Connection conn = DriverManager.getConnection(getUrl()); -String ddl = CREATE TABLE IF NOT EXISTS TEST_COALESCE( +String ddl = CREATE TABLE TEST_COALESCE( + ID BIGINT NOT NULL, + COUNT BIGINT + CONSTRAINT pk PRIMARY KEY(ID)); @@ -91,7 +91,7 @@ public class CoalesceFunctionIT extends BaseHBaseManagedTimeIT { public void coalesceWithSumImplicitLong() throws Exception { Connection conn = DriverManager.getConnection(getUrl()); -String ddl = CREATE TABLE IF NOT EXISTS TEST_COALESCE( +String ddl = CREATE TABLE TEST_COALESCE( + ID BIGINT NOT NULL, + COUNT BIGINT + CONSTRAINT pk PRIMARY KEY(ID)); @@ -115,7 +115,7 @@ public class CoalesceFunctionIT extends BaseHBaseManagedTimeIT { public void coalesceWithSecondParamAsExpression() throws Exception { Connection conn = DriverManager.getConnection(getUrl()); -String ddl = CREATE TABLE IF NOT EXISTS TEST_COALESCE( +String ddl = CREATE TABLE TEST_COALESCE( + ID BIGINT NOT NULL, + COUNT BIGINT + CONSTRAINT pk PRIMARY KEY(ID)); @@ -139,7 +139,7 @@ public class CoalesceFunctionIT extends BaseHBaseManagedTimeIT { public void nonTypedSecondParameterLong() throws Exception { Connection conn = DriverManager.getConnection(getUrl()); -String ddl = CREATE TABLE IF NOT EXISTS TEST_COALESCE( +String ddl = CREATE TABLE TEST_COALESCE( + ID BIGINT NOT NULL, + COUNT BIGINT //first parameter to coalesce + CONSTRAINT pk PRIMARY KEY(ID)); @@ -163,47 +163,32 @@ public class CoalesceFunctionIT extends BaseHBaseManagedTimeIT { public void nonTypedSecondParameterUnsignedDataTypes() throws Exception { Connection conn = DriverManager.getConnection(getUrl()); -String[] dataTypes = { -UNSIGNED_INT, -UNSIGNED_LONG, -UNSIGNED_TINYINT, -UNSIGNED_SMALLINT, -UNSIGNED_FLOAT, -UNSIGNED_DOUBLE, -UNSIGNED_TIME, -UNSIGNED_DATE, -UNSIGNED_TIMESTAMP -}; - -for (String dataType : dataTypes) { - -String ddl = CREATE TABLE IF NOT EXISTS TEST_COALESCE( -+ ID BIGINT NOT NULL, -+ COUNT + dataType //first parameter to coalesce -+ CONSTRAINT pk PRIMARY KEY(ID)); -conn.createStatement().execute(ddl); - -
[2/7] git commit: Fix comparison for min time before doing an update stats and set small min in test
Fix comparison for min time before doing an update stats and set small min in test Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/66a8808e Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/66a8808e Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/66a8808e Branch: refs/heads/3.0 Commit: 66a8808e0bb7f168c649bd2079b54e02542dbf03 Parents: b67b0c1 Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 01:16:36 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 01:16:36 2014 -0700 -- .../apache/phoenix/end2end/ParallelIteratorsIT.java| 5 +++-- .../java/org/apache/phoenix/schema/MetaDataClient.java | 13 ++--- 2 files changed, 9 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/66a8808e/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java index d256641..8e0866b 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java @@ -65,6 +65,7 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { // Must update config before starting server props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); +props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(1)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); } @@ -83,7 +84,7 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { pstmt = conn.prepareStatement(SELECT COUNT(*) FROM STABLE).unwrap(PhoenixPreparedStatement.class); pstmt.execute(); keyRanges = getAllSplits(conn); -assertEquals(Unexpected number of splits: + keyRanges, 5, keyRanges.size()); +assertEquals(Unexpected number of splits: + keyRanges, 7, keyRanges.size()); assertEquals(newKeyRange(KeyRange.UNBOUND, KMIN), keyRanges.get(0)); assertEquals(newKeyRange(KMIN, K3), keyRanges.get(1)); assertEquals(newKeyRange(K3, K4), keyRanges.get(2)); @@ -123,7 +124,7 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { stmt.execute(); conn.prepareStatement(SELECT COUNT(*) FROM STABLE).executeQuery(); keyRanges = getAllSplits(conn); -assertEquals(4, keyRanges.size()); +assertEquals(7, keyRanges.size()); upsert(conn, new byte[][] { KMIN2, K5, K12 }); stmt = conn.prepareStatement(ANALYZE STABLE); stmt.execute(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/66a8808e/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 36d70b1..3d5f53c 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -48,7 +48,6 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM; @@ -474,7 +473,7 @@ public class MetaDataClient { public MutationState updateStatistics(UpdateStatisticsStatement updateStatisticsStmt) throws SQLException { // Check before updating the stats if we have reached the configured time to reupdate the stats once again -long minTimeForStatsUpdate = connection.getQueryServices().getProps() +long msMinBetweenUpdates = connection.getQueryServices().getProps() .getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS); ColumnResolver resolver = FromCompiler.getResolver(updateStatisticsStmt, connection); PTable table =
[7/7] git commit: Increase memory and tweak test settings to get tests to pass on Mac
Increase memory and tweak test settings to get tests to pass on Mac Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/006bce10 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/006bce10 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/006bce10 Branch: refs/heads/3.0 Commit: 006bce10f5b768bac90f79e5d9e91568c7cbafbf Parents: 002a9de Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 09:01:26 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 09:01:26 2014 -0700 -- .../it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java| 1 - .../test/java/org/apache/phoenix/query/QueryServicesTestImpl.java | 2 +- pom.xml| 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/006bce10/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java index 8e0866b..e48a938 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java @@ -65,7 +65,6 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { // Must update config before starting server props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); -props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(1)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/006bce10/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java -- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java index 155ba19..54d854f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java @@ -52,7 +52,7 @@ public final class QueryServicesTestImpl extends BaseQueryServicesImpl { public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE = 1024L*1024L*4L; // 4 Mb public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE = 1024L*1024L*2L; // 2 Mb public static final long DEFAULT_HISTOGRAM_BYTE_DEPTH = 2000; -public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 2000; +public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 0; public QueryServicesTestImpl(ReadOnlyProps defaultProps) { this(defaultProps, ReadOnlyProps.EMPTY_PROPS); http://git-wip-us.apache.org/repos/asf/phoenix/blob/006bce10/pom.xml -- diff --git a/pom.xml b/pom.xml index bd9e9c2..cfc0cba 100644 --- a/pom.xml +++ b/pom.xml @@ -247,7 +247,7 @@ encodingUTF-8/encoding forkCount${numForkedIT}/forkCount reuseForkstrue/reuseForks - argLine-enableassertions -Xmx2500m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom/argLine + argLine-enableassertions -Xmx3000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom/argLine redirectTestOutputToFile${test.output.tofile}/redirectTestOutputToFile testSourceDirectory${basedir}/src/it/java/testSourceDirectory groupsorg.apache.phoenix.end2end.HBaseManagedTimeTest/groups
[3/7] git commit: PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor
PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor Conflicts: phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/29361c6b Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/29361c6b Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/29361c6b Branch: refs/heads/3.0 Commit: 29361c6b0c0ad06eafd49599c0f7c9a71dff3573 Parents: 66a8808 Author: James Taylor jtay...@salesforce.com Authored: Sat Oct 4 18:12:18 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 01:24:37 2014 -0700 -- .../coprocessor/MetaDataEndpointImpl.java | 52 ++-- .../org/apache/phoenix/util/ServerUtil.java | 2 - 2 files changed, 25 insertions(+), 29 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/29361c6b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index cc1117c..7d6317f 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -467,11 +467,9 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met } private PTableStats updateStatsInternal(byte[] tableNameBytes) throws IOException { -HTableInterface statsHTable = null; ImmutableBytesWritable ptr = new ImmutableBytesWritable(); +HTableInterface statsHTable = getEnvironment().getTable(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES); try { -// Can we do a new HTable instance here? Or get it from a pool or cache of these instances? -statsHTable = getEnvironment().getTable(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES); Scan s = newTableRowsScan(tableNameBytes); s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES); ResultScanner scanner = statsHTable.getScanner(s); @@ -519,9 +517,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met throw new IOException(e); } } finally { -if (statsHTable != null) { -statsHTable.close(); -} +statsHTable.close(); } return PTableStatsImpl.NO_STATS; } @@ -700,33 +696,35 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met scan.setFilter(filter); scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); HTableInterface hTable = getEnvironment().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); -ResultScanner scanner = hTable.getScanner(scan); - -boolean allViewsInCurrentRegion = true; -int numOfChildViews = 0; -ListResult results = Lists.newArrayList(); try { -for (Result result = scanner.next(); (result != null); result = scanner.next()) { -numOfChildViews++; -ImmutableBytesWritable ptr = new ImmutableBytesWritable(); -ResultTuple resultTuple = new ResultTuple(result); -resultTuple.getKey(ptr); -byte[] key = ptr.copyBytes(); -if (checkTableKeyInRegion(key, region) != null) { -allViewsInCurrentRegion = false; +ResultScanner scanner = hTable.getScanner(scan); + +boolean allViewsInCurrentRegion = true; +int numOfChildViews = 0; +ListResult results = Lists.newArrayList(); +try { +for (Result result = scanner.next(); (result != null); result = scanner.next()) { +numOfChildViews++; +ImmutableBytesWritable ptr = new ImmutableBytesWritable(); +ResultTuple resultTuple = new ResultTuple(result); +resultTuple.getKey(ptr); +byte[] key = ptr.copyBytes(); +if (checkTableKeyInRegion(key, region) != null) { +allViewsInCurrentRegion = false; +} +results.add(result); +} +TableViewFinderResult tableViewFinderResult = new TableViewFinderResult(results); +if (numOfChildViews 0 !allViewsInCurrentRegion) { +
[5/7] git commit: PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods
PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods Conflicts: phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f68f52a5 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f68f52a5 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f68f52a5 Branch: refs/heads/3.0 Commit: f68f52a57568ad93b19e3c812772ca11a506b281 Parents: ca478a7 Author: James Taylor jtay...@salesforce.com Authored: Sun Oct 5 10:48:11 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 01:37:32 2014 -0700 -- .../apache/phoenix/cache/ServerCacheClient.java | 3 ++- .../org/apache/phoenix/compile/ScanRanges.java | 28 +--- .../phoenix/iterate/ParallelIterators.java | 4 +-- .../compile/ScanRangesIntersectTest.java| 2 +- .../apache/phoenix/compile/ScanRangesTest.java | 2 +- 5 files changed, 24 insertions(+), 15 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/f68f52a5/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java index 267aec9..81b5f63 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java @@ -157,7 +157,8 @@ public class ServerCacheClient { byte[] regionStartKey = entry.getRegionInfo().getStartKey(); byte[] regionEndKey = entry.getRegionInfo().getEndKey(); if ( ! servers.contains(entry) -keyRanges.intersects(regionStartKey, regionEndKey, 0) ) { +keyRanges.intersects(regionStartKey, regionEndKey, 0, true) ) { +// Call RPC once per server servers.add(entry); if (LOG.isDebugEnabled()) {LOG.debug(Adding cache entry to be sent for + entry);} final byte[] key = entry.getRegionInfo().getStartKey(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/f68f52a5/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java index 533d752..d5d2280 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java @@ -207,7 +207,7 @@ public class ScanRanges { return temp; } -public Scan intersectScan(Scan scan, final byte[] originalStartKey, final byte[] originalStopKey, final int keyOffset) { +public Scan intersectScan(Scan scan, final byte[] originalStartKey, final byte[] originalStopKey, final int keyOffset, boolean crossesRegionBoundary) { byte[] startKey = originalStartKey; byte[] stopKey = originalStopKey; if (stopKey.length 0 Bytes.compareTo(startKey, stopKey) = 0) { @@ -218,16 +218,22 @@ public class ScanRanges { // salt bytes in that case. final int scanKeyOffset = this.isSalted !this.isPointLookup ? SaltingUtil.NUM_SALTING_BYTES : 0; assert (scanKeyOffset == 0 || keyOffset == 0); -// Offset for startKey/stopKey. Either 1 for salted tables or the prefix length -// of the current region for local indexes. +// Total offset for startKey/stopKey. Either 1 for salted tables or the prefix length +// of the current region for local indexes. We'll never have a case where a table is +// both salted and local. final int totalKeyOffset = scanKeyOffset + keyOffset; -// In this case, we've crossed the prefix boundary and should consider everything after the startKey -// This prevents us from having to prefix the key prior to knowing whether or not there may be an -// intersection. byte[] prefixBytes = ByteUtil.EMPTY_BYTE_ARRAY; if (totalKeyOffset 0) { prefixBytes = ScanUtil.getPrefix(startKey, totalKeyOffset); -if (ScanUtil.crossesPrefixBoundary(stopKey, prefixBytes, totalKeyOffset)) { +/* + * If our startKey to stopKey crosses a region boundary consider everything after the startKey as our scan + * is always done within a single region. This prevents us from having to prefix the key
Build failed in Jenkins: Phoenix | 3.0 | Hadoop1 #241
See https://builds.apache.org/job/Phoenix-3.0-hadoop1/241/changes Changes: [jtaylor] PHOENIX-1320 Update stats atomically [jtaylor] Fix comparison for min time before doing an update stats and set small min in test [jtaylor] PHOENIX-1323 Use utility method to get HTableInterface for scans from coprocessor [jtaylor] PHOENIX-1315 Optimize query for Pig loader [jtaylor] PHOENIX-1325 Pass in instead of calculate if we've crossed a region boundary in ScanRanges intersect methods [jtaylor] PHOENIX-1257 Upserted data seen by SELECT in UPSERT SELECT execution (Lars Hofhansl) [jtaylor] Increase memory and tweak test settings to get tests to pass on Mac -- [...truncated 9874 lines...] CastAndCoerceITBaseClientManagedTimeIT.dropTables:72 null TopNITBaseClientManagedTimeIT.dropTables:72 null KeyOnlyITBaseClientManagedTimeIT.dropTables:72 null QueryDatabaseMetaDataITBaseClientManagedTimeIT.dropTables:72 null RowValueConstructorITBaseClientManagedTimeIT.dropTables:72 null ColumnProjectionOptimizationITBaseClientManagedTimeIT.dropTables:72 null PercentileITBaseClientManagedTimeIT.dropTables:72 null DistinctCountITBaseClientManagedTimeIT.dropTables:72 null CaseStatementITBaseClientManagedTimeIT.dropTables:72 null ToCharFunctionITBaseClientManagedTimeIT.dropTables:72 null SaltedTableITBaseClientManagedTimeIT.dropTables:72 null UpsertSelectITBaseClientManagedTimeIT.dropTables:72 null StddevITBaseClientManagedTimeIT.dropTables:72 null ProductMetricsITBaseClientManagedTimeIT.dropTables:72 null ReadIsolationLevelITBaseClientManagedTimeIT.dropTables:72 null NotQueryITBaseClientManagedTimeIT.dropTables:72 null CreateTableITBaseClientManagedTimeIT.dropTables:72 null CompareDecimalToLongITBaseClientManagedTimeIT.dropTables:72 null OrderByITBaseClientManagedTimeIT.dropTables:72 null Tests in error: SequenceIT.doSetup:70-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime UpsertValuesITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime ToNumberFunctionITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime ToNumberFunctionIT.tearDownAfterClass:88 » NullPointer Can't set default local... IsNullITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime NativeHBaseTypesITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime InMemoryOrderByIT.doSetup:41-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime TenantSpecificTablesDDLITBaseTenantSpecificTablesIT.doSetup:88-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime ClientTimeArithmeticQueryITBaseQueryIT.doSetup:84-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime TruncateFunctionITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime DerivedTableITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime QueryITBaseQueryIT.doSetup:84-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime GroupByITBaseQueryIT.doSetup:84-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime SpooledOrderByIT.doSetup:38-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime VariableLengthPKITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime DynamicColumnITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime ScanQueryITBaseQueryIT.doSetup:84-BaseTest.setUpTestDriver:476-BaseTest.checkClusterInitialized:455-BaseTest.setUpTestCluster:469-BaseTest.initMiniCluster:519 » Runtime
git commit: Fix comparison for min time before doing an update stats and set small min in test Set frequency for stats update to 0 for testing
Repository: phoenix Updated Branches: refs/heads/4.0 e49e8dcfb - e500538e0 Fix comparison for min time before doing an update stats and set small min in test Set frequency for stats update to 0 for testing Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e500538e Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e500538e Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e500538e Branch: refs/heads/4.0 Commit: e500538e021ea3c2cc727abbc24b458890de6a07 Parents: e49e8dc Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 01:16:36 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 09:16:48 2014 -0700 -- .../apache/phoenix/end2end/ParallelIteratorsIT.java| 1 + .../java/org/apache/phoenix/schema/MetaDataClient.java | 13 ++--- .../apache/phoenix/query/QueryServicesTestImpl.java| 2 ++ 3 files changed, 9 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/e500538e/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java index 97ca828..72885ab 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java @@ -65,6 +65,7 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { // Must update config before starting server props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20l)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); +props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(1)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/e500538e/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 09f77ac..469f8fe 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -49,7 +49,6 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM; @@ -481,7 +480,7 @@ public class MetaDataClient { public MutationState updateStatistics(UpdateStatisticsStatement updateStatisticsStmt) throws SQLException { // Check before updating the stats if we have reached the configured time to reupdate the stats once again -long minTimeForStatsUpdate = connection.getQueryServices().getProps() +long msMinBetweenUpdates = connection.getQueryServices().getProps() .getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS); ColumnResolver resolver = FromCompiler.getResolver(updateStatisticsStmt, connection); PTable table = resolver.getTables().get(0).getTable(); @@ -493,15 +492,15 @@ public class MetaDataClient { connection.getQueryServices().clearCacheForTable(tenantIdBytes, Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())), Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS); -String query = SELECT CURRENT_DATE(),+ LAST_STATS_UPDATE_TIME + FROM + SYSTEM_CATALOG_SCHEMA -+ . + SYSTEM_STATS_TABLE + WHERE + PHYSICAL_NAME + =' + physicalName.getString() + ' AND + COLUMN_FAMILY +String query = SELECT CURRENT_DATE(),+ LAST_STATS_UPDATE_TIME + FROM + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME ++ WHERE + PHYSICAL_NAME + =' + physicalName.getString() + ' AND + COLUMN_FAMILY + IS NULL AND + REGION_NAME + IS NULL; ResultSet rs =
git commit: Fix comparison for min time before doing an update stats and set small min in test Set frequency for stats update to 0 for testing
Repository: phoenix Updated Branches: refs/heads/master 166671c89 - 9b98d0a8c Fix comparison for min time before doing an update stats and set small min in test Set frequency for stats update to 0 for testing Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9b98d0a8 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9b98d0a8 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9b98d0a8 Branch: refs/heads/master Commit: 9b98d0a8ccde8da79e537e58cdd8e67dc520cad5 Parents: 166671c Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 01:16:36 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 09:22:39 2014 -0700 -- .../java/org/apache/phoenix/schema/MetaDataClient.java | 13 ++--- .../apache/phoenix/query/QueryServicesTestImpl.java| 2 ++ 2 files changed, 8 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/9b98d0a8/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 09f77ac..469f8fe 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -49,7 +49,6 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM; @@ -481,7 +480,7 @@ public class MetaDataClient { public MutationState updateStatistics(UpdateStatisticsStatement updateStatisticsStmt) throws SQLException { // Check before updating the stats if we have reached the configured time to reupdate the stats once again -long minTimeForStatsUpdate = connection.getQueryServices().getProps() +long msMinBetweenUpdates = connection.getQueryServices().getProps() .getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS); ColumnResolver resolver = FromCompiler.getResolver(updateStatisticsStmt, connection); PTable table = resolver.getTables().get(0).getTable(); @@ -493,15 +492,15 @@ public class MetaDataClient { connection.getQueryServices().clearCacheForTable(tenantIdBytes, Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())), Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS); -String query = SELECT CURRENT_DATE(),+ LAST_STATS_UPDATE_TIME + FROM + SYSTEM_CATALOG_SCHEMA -+ . + SYSTEM_STATS_TABLE + WHERE + PHYSICAL_NAME + =' + physicalName.getString() + ' AND + COLUMN_FAMILY +String query = SELECT CURRENT_DATE(),+ LAST_STATS_UPDATE_TIME + FROM + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME ++ WHERE + PHYSICAL_NAME + =' + physicalName.getString() + ' AND + COLUMN_FAMILY + IS NULL AND + REGION_NAME + IS NULL; ResultSet rs = connection.createStatement().executeQuery(query); -long lastUpdatedTime = 0; +long msSinceLastUpdate = Long.MAX_VALUE; if (rs.next() rs.getDate(2) != null) { -lastUpdatedTime = rs.getDate(1).getTime() - rs.getDate(2).getTime(); +msSinceLastUpdate = rs.getDate(1).getTime() - rs.getDate(2).getTime(); } -if (minTimeForStatsUpdate lastUpdatedTime) { +if (msSinceLastUpdate = msMinBetweenUpdates) { // Here create the select query. String countQuery = SELECT /*+ NO_CACHE */ count(*) FROM + table.getName().getString(); PhoenixStatement statement = (PhoenixStatement) connection.createStatement(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/9b98d0a8/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java -- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java index
git commit: Fix constant declaration for better readability
Repository: phoenix Updated Branches: refs/heads/master 9b98d0a8c - 719eaf07a Fix constant declaration for better readability Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/719eaf07 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/719eaf07 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/719eaf07 Branch: refs/heads/master Commit: 719eaf07aea4b79b2b322ee2064dc54835adf4e2 Parents: 9b98d0a Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 09:25:08 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 09:25:08 2014 -0700 -- .../it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/719eaf07/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java index 97ca828..e48a938 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java @@ -63,7 +63,7 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { public static void doSetup() throws Exception { MapString,String props = Maps.newHashMapWithExpectedSize(3); // Must update config before starting server -props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20l)); +props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); }
git commit: Fix constant declaration for better readability
Repository: phoenix Updated Branches: refs/heads/4.0 e500538e0 - 926155246 Fix constant declaration for better readability Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/92615524 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/92615524 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/92615524 Branch: refs/heads/4.0 Commit: 926155246741dcdc89b8d904a8d391387d3dc0b7 Parents: e500538 Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 09:25:08 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 09:27:23 2014 -0700 -- .../it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/92615524/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java index 72885ab..e48a938 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java @@ -63,9 +63,8 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT { public static void doSetup() throws Exception { MapString,String props = Maps.newHashMapWithExpectedSize(3); // Must update config before starting server -props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20l)); +props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); -props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(1)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); }
Build failed in Jenkins: Phoenix | 4.0 | Hadoop1 #354
See https://builds.apache.org/job/Phoenix-4.0-hadoop1/354/changes Changes: [jtaylor] Fix comparison for min time before doing an update stats and set small min in test -- [...truncated 500 lines...] Running org.apache.phoenix.end2end.CaseStatementIT Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.094 sec - in org.apache.phoenix.end2end.PercentileIT Running org.apache.phoenix.end2end.ToCharFunctionIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.671 sec - in org.apache.phoenix.end2end.ColumnProjectionOptimizationIT Running org.apache.phoenix.end2end.salted.SaltedTableIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.355 sec - in org.apache.phoenix.end2end.ToCharFunctionIT Running org.apache.phoenix.end2end.UpsertSelectIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.801 sec - in org.apache.phoenix.end2end.DistinctCountIT Running org.apache.phoenix.end2end.StddevIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.27 sec - in org.apache.phoenix.end2end.salted.SaltedTableIT Running org.apache.phoenix.end2end.ProductMetricsIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.24 sec - in org.apache.phoenix.end2end.StddevIT Running org.apache.phoenix.end2end.NotQueryIT Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.635 sec - in org.apache.phoenix.end2end.UpsertSelectIT Running org.apache.phoenix.end2end.ReadIsolationLevelIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.119 sec - in org.apache.phoenix.end2end.ReadIsolationLevelIT Running org.apache.phoenix.end2end.CreateTableIT Tests run: 61, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.201 sec - in org.apache.phoenix.end2end.ProductMetricsIT Running org.apache.phoenix.end2end.CompareDecimalToLongIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 175.42 sec - in org.apache.phoenix.end2end.QueryIT Running org.apache.phoenix.end2end.OrderByIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.61 sec - in org.apache.phoenix.end2end.CompareDecimalToLongIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.858 sec - in org.apache.phoenix.end2end.OrderByIT Tests run: 63, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 50.879 sec - in org.apache.phoenix.end2end.CaseStatementIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 21.951 sec - in org.apache.phoenix.end2end.CreateTableIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 63.428 sec - in org.apache.phoenix.end2end.NotQueryIT Results : Tests run: 1266, Failures: 0, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-4.0-hadoop1/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.end2end.UpsertSelectAutoCommitIT Running org.apache.phoenix.end2end.BinaryRowKeyIT Running org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.537 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.276 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.DynamicFamilyIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.993 sec - in org.apache.phoenix.end2end.BinaryRowKeyIT Running org.apache.phoenix.end2end.SortOrderFIT Running org.apache.phoenix.end2end.ReverseScanIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.687 sec - in org.apache.phoenix.end2end.ReverseScanIT Running org.apache.phoenix.end2end.MD5FunctionIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.414 sec - in org.apache.phoenix.end2end.DynamicFamilyIT Running org.apache.phoenix.end2end.SkipScanQueryIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.621 sec - in org.apache.phoenix.end2end.UpsertSelectAutoCommitIT Running org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.424 sec - in org.apache.phoenix.end2end.MD5FunctionIT Running org.apache.phoenix.end2end.AlterTableIT Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.23 sec - in
Build failed in Jenkins: Phoenix | 4.0 | Hadoop1 #355
See https://builds.apache.org/job/Phoenix-4.0-hadoop1/355/changes Changes: [jtaylor] Fix constant declaration for better readability -- [...truncated 500 lines...] Running org.apache.phoenix.end2end.CaseStatementIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.428 sec - in org.apache.phoenix.end2end.ColumnProjectionOptimizationIT Running org.apache.phoenix.end2end.ToCharFunctionIT Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.054 sec - in org.apache.phoenix.end2end.PercentileIT Running org.apache.phoenix.end2end.salted.SaltedTableIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.615 sec - in org.apache.phoenix.end2end.ToCharFunctionIT Running org.apache.phoenix.end2end.UpsertSelectIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.837 sec - in org.apache.phoenix.end2end.salted.SaltedTableIT Running org.apache.phoenix.end2end.StddevIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.818 sec - in org.apache.phoenix.end2end.DistinctCountIT Running org.apache.phoenix.end2end.ProductMetricsIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.558 sec - in org.apache.phoenix.end2end.StddevIT Running org.apache.phoenix.end2end.NotQueryIT Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.002 sec - in org.apache.phoenix.end2end.UpsertSelectIT Running org.apache.phoenix.end2end.ReadIsolationLevelIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.678 sec - in org.apache.phoenix.end2end.ReadIsolationLevelIT Running org.apache.phoenix.end2end.CreateTableIT Tests run: 61, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.933 sec - in org.apache.phoenix.end2end.ProductMetricsIT Running org.apache.phoenix.end2end.CompareDecimalToLongIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 175.505 sec - in org.apache.phoenix.end2end.QueryIT Running org.apache.phoenix.end2end.OrderByIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.012 sec - in org.apache.phoenix.end2end.CompareDecimalToLongIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.279 sec - in org.apache.phoenix.end2end.OrderByIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 21.459 sec - in org.apache.phoenix.end2end.CreateTableIT Tests run: 63, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 56.979 sec - in org.apache.phoenix.end2end.CaseStatementIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 59.728 sec - in org.apache.phoenix.end2end.NotQueryIT Results : Tests run: 1266, Failures: 0, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-4.0-hadoop1/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.end2end.BinaryRowKeyIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.end2end.UpsertSelectAutoCommitIT Running org.apache.phoenix.trace.PhoenixTraceReaderIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.605 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.end2end.DynamicFamilyIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.897 sec - in org.apache.phoenix.end2end.BinaryRowKeyIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.775 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.SortOrderFIT Running org.apache.phoenix.end2end.ReverseScanIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.168 sec - in org.apache.phoenix.end2end.ReverseScanIT Running org.apache.phoenix.end2end.MD5FunctionIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.151 sec - in org.apache.phoenix.end2end.DynamicFamilyIT Running org.apache.phoenix.end2end.SkipScanQueryIT Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.573 sec - in org.apache.phoenix.end2end.SortOrderFIT Running org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.582 sec - in org.apache.phoenix.end2end.MD5FunctionIT Running org.apache.phoenix.end2end.AlterTableIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.095 sec - in
Build failed in Jenkins: Phoenix | Master | Hadoop1 #401
See https://builds.apache.org/job/Phoenix-master-hadoop1/401/changes Changes: [jtaylor] Fix comparison for min time before doing an update stats and set small min in test [jtaylor] Fix constant declaration for better readability -- [...truncated 502 lines...] Running org.apache.phoenix.end2end.ArrayIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.898 sec - in org.apache.phoenix.end2end.CreateTableIT Running org.apache.phoenix.end2end.TruncateFunctionIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.804 sec - in org.apache.phoenix.end2end.TruncateFunctionIT Running org.apache.phoenix.end2end.RowValueConstructorIT Tests run: 48, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 24.24 sec - in org.apache.phoenix.end2end.ArrayIT Running org.apache.phoenix.end2end.StddevIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.666 sec - in org.apache.phoenix.end2end.StddevIT Running org.apache.phoenix.end2end.MultiCfQueryExecIT Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.366 sec - in org.apache.phoenix.end2end.MultiCfQueryExecIT Running org.apache.phoenix.end2end.NotQueryIT Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.158 sec - in org.apache.phoenix.end2end.RowValueConstructorIT Running org.apache.phoenix.end2end.IsNullIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.423 sec - in org.apache.phoenix.end2end.IsNullIT Running org.apache.phoenix.end2end.PercentileIT Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.06 sec - in org.apache.phoenix.end2end.PercentileIT Running org.apache.phoenix.end2end.DistinctCountIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.452 sec - in org.apache.phoenix.end2end.DistinctCountIT Running org.apache.phoenix.end2end.InMemoryOrderByIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.031 sec - in org.apache.phoenix.end2end.InMemoryOrderByIT Running org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 8, Time elapsed: 0.117 sec - in org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 101.292 sec - in org.apache.phoenix.end2end.ScanQueryIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 60.218 sec - in org.apache.phoenix.end2end.NotQueryIT Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 178.737 sec - in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 209.562 sec - in org.apache.phoenix.end2end.QueryIT Results : Tests run: 1266, Failures: 0, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-master-hadoop1/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.end2end.SpillableGroupByIT Running org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.end2end.EncodeFunctionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.709 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.9 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.CSVCommonsLoaderIT Running org.apache.phoenix.end2end.ParallelIteratorsIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.181 sec - in org.apache.phoenix.end2end.SpillableGroupByIT Running org.apache.phoenix.end2end.SkipScanAfterManualSplitIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.248 sec - in org.apache.phoenix.end2end.EncodeFunctionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.934 sec - in org.apache.phoenix.end2end.ParallelIteratorsIT Running org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT Running org.apache.phoenix.end2end.EvaluationOfORIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.851 sec - in org.apache.phoenix.end2end.EvaluationOfORIT Running org.apache.phoenix.end2end.index.SaltedIndexIT Tests run: 14, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.329 sec - in org.apache.phoenix.end2end.CSVCommonsLoaderIT Running
[3/3] git commit: PHOENIX-167 Support semi/anti-joins
PHOENIX-167 Support semi/anti-joins Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/909d9759 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/909d9759 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/909d9759 Branch: refs/heads/master Commit: 909d975960f4592e75fb3fdb6c2d0cecd2d51f1d Parents: 719eaf0 Author: maryannxue maryann...@apache.org Authored: Mon Oct 6 13:29:07 2014 -0400 Committer: maryannxue maryann...@apache.org Committed: Mon Oct 6 13:29:07 2014 -0400 -- .../org/apache/phoenix/end2end/HashJoinIT.java | 433 +++--- .../org/apache/phoenix/end2end/SubqueryIT.java | 810 +++ phoenix-core/src/main/antlr3/PhoenixSQL.g | 2 +- .../phoenix/compile/ExpressionCompiler.java | 25 +- .../apache/phoenix/compile/JoinCompiler.java| 135 ++-- .../apache/phoenix/compile/QueryCompiler.java | 37 +- .../phoenix/compile/StatementNormalizer.java| 3 +- .../phoenix/compile/SubqueryRewriter.java | 401 + .../apache/phoenix/compile/UpsertCompiler.java | 5 + .../apache/phoenix/compile/WhereOptimizer.java | 15 +- .../coprocessor/HashJoinRegionScanner.java | 18 +- .../phoenix/exception/SQLExceptionCode.java | 3 +- .../apache/phoenix/execute/HashJoinPlan.java| 44 +- .../apache/phoenix/jdbc/PhoenixStatement.java | 6 + .../phoenix/parse/BooleanParseNodeVisitor.java | 10 + .../apache/phoenix/parse/ExistsParseNode.java | 6 +- .../org/apache/phoenix/parse/JoinTableNode.java | 10 +- .../apache/phoenix/parse/ParseNodeFactory.java | 17 +- .../apache/phoenix/parse/ParseNodeRewriter.java | 18 +- .../apache/phoenix/parse/ParseNodeVisitor.java | 3 + .../StatelessTraverseAllParseNodeVisitor.java | 9 + .../parse/TraverseAllParseNodeVisitor.java | 40 +- .../parse/TraverseNoParseNodeVisitor.java | 10 + .../parse/UnsupportedAllParseNodeVisitor.java | 25 + .../phoenix/compile/JoinQueryCompilerTest.java | 13 +- .../java/org/apache/phoenix/query/BaseTest.java | 8 + .../java/org/apache/phoenix/util/TestUtil.java | 3 + 27 files changed, 1644 insertions(+), 465 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/909d9759/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java index ceba009..8e35216 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java @@ -119,8 +119,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [I.NAME]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ITEM_TABLE_DISPLAY_NAME, /* * testLeftJoinWithAggregation() @@ -132,8 +131,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ITEM_TABLE_DISPLAY_NAME + \n + SERVER FILTER BY FIRST KEY ONLY, /* @@ -147,8 +145,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ORDER_TABLE_DISPLAY_NAME, /* * testRightJoinWithAggregation() @@ -160,8 +157,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [I.NAME]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE
[2/3] PHOENIX-167 Support semi/anti-joins
http://git-wip-us.apache.org/repos/asf/phoenix/blob/909d9759/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java new file mode 100644 index 000..3aecd29 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java @@ -0,0 +1,810 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end; + +import static org.apache.phoenix.util.TestUtil.JOIN_COITEM_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_COITEM_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_CUSTOMER_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_SCHEMA; +import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.sql.Connection; +import java.sql.Date; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.regex.Pattern; + +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.schema.TableAlreadyExistsException; +import org.apache.phoenix.util.MetaDataUtil; +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.QueryUtil; +import org.apache.phoenix.util.ReadOnlyProps; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +@Category(HBaseManagedTimeTest.class) +@RunWith(Parameterized.class) +public class SubqueryIT extends BaseHBaseManagedTimeIT { + +private SimpleDateFormat format = new SimpleDateFormat(-MM-dd HH:mm:ss); +private String[] indexDDL; +private String[] plans; + +public SubqueryIT(String[] indexDDL, String[] plans) { +this.indexDDL = indexDDL; +this.plans = plans; +} + +@BeforeClass +@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class) +public static void doSetup() throws Exception { +MapString,String props = Maps.newHashMapWithExpectedSize(3); +// Forces server cache to be used +props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2)); +// Must update config before starting server +setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); +} + +@Before +public void initTable() throws Exception { +initTableValues(); +if (indexDDL != null indexDDL.length 0) { +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); +Connection conn = DriverManager.getConnection(getUrl(), props); +for (String ddl : indexDDL) { +try { +conn.createStatement().execute(ddl); +} catch (TableAlreadyExistsException e) { +} +} +conn.close(); +} +} + +@Parameters +public static CollectionObject data() { +ListObject testCases = Lists.newArrayList(); +testCases.add(new String[][] { +{}, { +CLIENT
[2/3] PHOENIX-167 Support semi/anti-joins
http://git-wip-us.apache.org/repos/asf/phoenix/blob/30d496bc/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java new file mode 100644 index 000..3aecd29 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java @@ -0,0 +1,810 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end; + +import static org.apache.phoenix.util.TestUtil.JOIN_COITEM_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_COITEM_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_CUSTOMER_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_SCHEMA; +import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_DISPLAY_NAME; +import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.sql.Connection; +import java.sql.Date; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.regex.Pattern; + +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.schema.TableAlreadyExistsException; +import org.apache.phoenix.util.MetaDataUtil; +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.QueryUtil; +import org.apache.phoenix.util.ReadOnlyProps; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +@Category(HBaseManagedTimeTest.class) +@RunWith(Parameterized.class) +public class SubqueryIT extends BaseHBaseManagedTimeIT { + +private SimpleDateFormat format = new SimpleDateFormat(-MM-dd HH:mm:ss); +private String[] indexDDL; +private String[] plans; + +public SubqueryIT(String[] indexDDL, String[] plans) { +this.indexDDL = indexDDL; +this.plans = plans; +} + +@BeforeClass +@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class) +public static void doSetup() throws Exception { +MapString,String props = Maps.newHashMapWithExpectedSize(3); +// Forces server cache to be used +props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2)); +// Must update config before starting server +setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); +} + +@Before +public void initTable() throws Exception { +initTableValues(); +if (indexDDL != null indexDDL.length 0) { +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); +Connection conn = DriverManager.getConnection(getUrl(), props); +for (String ddl : indexDDL) { +try { +conn.createStatement().execute(ddl); +} catch (TableAlreadyExistsException e) { +} +} +conn.close(); +} +} + +@Parameters +public static CollectionObject data() { +ListObject testCases = Lists.newArrayList(); +testCases.add(new String[][] { +{}, { +CLIENT
[1/3] PHOENIX-167 Support semi/anti-joins
Repository: phoenix Updated Branches: refs/heads/4.0 926155246 - 30d496bcc http://git-wip-us.apache.org/repos/asf/phoenix/blob/30d496bc/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java new file mode 100644 index 000..42d060f --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java @@ -0,0 +1,401 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.compile; + +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.parse.AliasedNode; +import org.apache.phoenix.parse.AndParseNode; +import org.apache.phoenix.parse.BooleanParseNodeVisitor; +import org.apache.phoenix.parse.ColumnParseNode; +import org.apache.phoenix.parse.ComparisonParseNode; +import org.apache.phoenix.parse.CompoundParseNode; +import org.apache.phoenix.parse.ExistsParseNode; +import org.apache.phoenix.parse.InParseNode; +import org.apache.phoenix.parse.JoinTableNode.JoinType; +import org.apache.phoenix.parse.LiteralParseNode; +import org.apache.phoenix.parse.ParseNode; +import org.apache.phoenix.parse.ParseNodeFactory; +import org.apache.phoenix.parse.ParseNodeRewriter; +import org.apache.phoenix.parse.RowValueConstructorParseNode; +import org.apache.phoenix.parse.SelectStatement; +import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor; +import org.apache.phoenix.parse.SubqueryParseNode; +import org.apache.phoenix.parse.TableName; +import org.apache.phoenix.parse.TableNode; +import org.apache.phoenix.schema.ColumnFamilyNotFoundException; +import org.apache.phoenix.schema.ColumnNotFoundException; + +import com.google.common.collect.Lists; + +/* + * Class for rewriting where-clause sub-queries into join queries. + * + * If the where-clause sub-query is one of those top-node conditions (being + * the only condition node or direct descendant of AND nodes), we convert the + * sub-query directly into semi-joins, anti-joins or inner-joins, and meanwhile + * remove the original condition node from the where clause. + * Otherwise, we convert the sub-query into left-joins and change the original + * condition node into a null test of a join table field (ONE if matched, NULL + * if not matched). + */ +public class SubqueryRewriter extends ParseNodeRewriter { +private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + +private final ColumnResolver resolver; +private final PhoenixConnection connection; +private TableNode tableNode; +private ParseNode topNode; + +public static SelectStatement transform(SelectStatement select, ColumnResolver resolver, PhoenixConnection connection) throws SQLException { +ParseNode where = select.getWhere(); +if (where == null) +return select; + +SubqueryRewriter rewriter = new SubqueryRewriter(select, resolver, connection); +ParseNode normWhere = rewrite(where, rewriter); +if (normWhere == where) +return select; + +return NODE_FACTORY.select(Collections.singletonList(rewriter.tableNode), select.getHint(), select.isDistinct(), select.getSelect(), normWhere, select.getGroupBy(), select.getHaving(), select.getOrderBy(), select.getLimit(), select.getBindCount(), select.isAggregate(), select.hasSequence()); +} + +protected SubqueryRewriter(SelectStatement select, ColumnResolver resolver, PhoenixConnection connection) { +this.resolver = resolver; +this.connection = connection; +this.tableNode = select.getFrom().get(0); +this.topNode = null; +} + +@Override +protected void enterParseNode(ParseNode node) { +
[3/3] git commit: PHOENIX-167 Support semi/anti-joins
PHOENIX-167 Support semi/anti-joins Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/30d496bc Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/30d496bc Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/30d496bc Branch: refs/heads/4.0 Commit: 30d496bccd8bc55401cc7ee954ac7d795f8de9d7 Parents: 9261552 Author: maryannxue maryann...@apache.org Authored: Mon Oct 6 13:37:17 2014 -0400 Committer: maryannxue maryann...@apache.org Committed: Mon Oct 6 13:37:17 2014 -0400 -- .../org/apache/phoenix/end2end/HashJoinIT.java | 433 +++--- .../org/apache/phoenix/end2end/SubqueryIT.java | 810 +++ phoenix-core/src/main/antlr3/PhoenixSQL.g | 2 +- .../phoenix/compile/ExpressionCompiler.java | 25 +- .../apache/phoenix/compile/JoinCompiler.java| 135 ++-- .../apache/phoenix/compile/QueryCompiler.java | 37 +- .../phoenix/compile/StatementNormalizer.java| 3 +- .../phoenix/compile/SubqueryRewriter.java | 401 + .../apache/phoenix/compile/UpsertCompiler.java | 5 + .../apache/phoenix/compile/WhereOptimizer.java | 15 +- .../coprocessor/HashJoinRegionScanner.java | 18 +- .../phoenix/exception/SQLExceptionCode.java | 3 +- .../apache/phoenix/execute/HashJoinPlan.java| 44 +- .../apache/phoenix/jdbc/PhoenixStatement.java | 6 + .../phoenix/parse/BooleanParseNodeVisitor.java | 10 + .../apache/phoenix/parse/ExistsParseNode.java | 6 +- .../org/apache/phoenix/parse/JoinTableNode.java | 10 +- .../apache/phoenix/parse/ParseNodeFactory.java | 17 +- .../apache/phoenix/parse/ParseNodeRewriter.java | 18 +- .../apache/phoenix/parse/ParseNodeVisitor.java | 3 + .../StatelessTraverseAllParseNodeVisitor.java | 9 + .../parse/TraverseAllParseNodeVisitor.java | 40 +- .../parse/TraverseNoParseNodeVisitor.java | 10 + .../parse/UnsupportedAllParseNodeVisitor.java | 25 + .../phoenix/compile/JoinQueryCompilerTest.java | 13 +- .../java/org/apache/phoenix/query/BaseTest.java | 8 + .../java/org/apache/phoenix/util/TestUtil.java | 3 + 27 files changed, 1644 insertions(+), 465 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/30d496bc/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java index ceba009..8e35216 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java @@ -119,8 +119,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [I.NAME]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ITEM_TABLE_DISPLAY_NAME, /* * testLeftJoinWithAggregation() @@ -132,8 +131,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ITEM_TABLE_DISPLAY_NAME + \n + SERVER FILTER BY FIRST KEY ONLY, /* @@ -147,8 +145,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ORDER_TABLE_DISPLAY_NAME, /* * testRightJoinWithAggregation() @@ -160,8 +157,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [I.NAME]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE
[1/3] PHOENIX-167 Support semi/anti-joins
Repository: phoenix Updated Branches: refs/heads/3.0 006bce10f - 5effbbca0 http://git-wip-us.apache.org/repos/asf/phoenix/blob/5effbbca/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java -- diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java index 67fa531..6f78a2b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java @@ -178,14 +178,17 @@ public class TestUtil { public static final String JOIN_CUSTOMER_TABLE = CustomerTable; public static final String JOIN_ITEM_TABLE = ItemTable; public static final String JOIN_SUPPLIER_TABLE = SupplierTable; +public static final String JOIN_COITEM_TABLE = CoitemTable; public static final String JOIN_ORDER_TABLE_FULL_NAME = '' + JOIN_SCHEMA + \.\ + JOIN_ORDER_TABLE + ''; public static final String JOIN_CUSTOMER_TABLE_FULL_NAME = '' + JOIN_SCHEMA + \.\ + JOIN_CUSTOMER_TABLE + ''; public static final String JOIN_ITEM_TABLE_FULL_NAME = '' + JOIN_SCHEMA + \.\ + JOIN_ITEM_TABLE + ''; public static final String JOIN_SUPPLIER_TABLE_FULL_NAME = '' + JOIN_SCHEMA + \.\ + JOIN_SUPPLIER_TABLE + ''; +public static final String JOIN_COITEM_TABLE_FULL_NAME = '' + JOIN_SCHEMA + \.\ + JOIN_COITEM_TABLE + ''; public static final String JOIN_ORDER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + . + JOIN_ORDER_TABLE; public static final String JOIN_CUSTOMER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + . + JOIN_CUSTOMER_TABLE; public static final String JOIN_ITEM_TABLE_DISPLAY_NAME = JOIN_SCHEMA + . + JOIN_ITEM_TABLE; public static final String JOIN_SUPPLIER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + . + JOIN_SUPPLIER_TABLE; +public static final String JOIN_COITEM_TABLE_DISPLAY_NAME = JOIN_SCHEMA + . + JOIN_COITEM_TABLE; /** * Read-only properties used by all tests
[3/3] git commit: PHOENIX-167 Support semi/anti-joins
PHOENIX-167 Support semi/anti-joins Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5effbbca Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5effbbca Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5effbbca Branch: refs/heads/3.0 Commit: 5effbbca0fd904168edc9d5a35aaf847cfb68dc6 Parents: 006bce1 Author: maryannxue maryann...@apache.org Authored: Mon Oct 6 13:55:56 2014 -0400 Committer: maryannxue maryann...@apache.org Committed: Mon Oct 6 13:55:56 2014 -0400 -- .../org/apache/phoenix/end2end/HashJoinIT.java | 338 ++--- .../org/apache/phoenix/end2end/SubqueryIT.java | 750 +++ phoenix-core/src/main/antlr3/PhoenixSQL.g | 2 +- .../phoenix/compile/ExpressionCompiler.java | 25 +- .../apache/phoenix/compile/JoinCompiler.java| 135 ++-- .../apache/phoenix/compile/QueryCompiler.java | 37 +- .../phoenix/compile/StatementNormalizer.java| 3 +- .../phoenix/compile/SubqueryRewriter.java | 401 ++ .../apache/phoenix/compile/UpsertCompiler.java | 5 + .../apache/phoenix/compile/WhereOptimizer.java | 15 +- .../coprocessor/HashJoinRegionScanner.java | 18 +- .../phoenix/exception/SQLExceptionCode.java | 3 +- .../apache/phoenix/execute/HashJoinPlan.java| 44 +- .../apache/phoenix/jdbc/PhoenixStatement.java | 6 + .../phoenix/parse/BooleanParseNodeVisitor.java | 10 + .../apache/phoenix/parse/ExistsParseNode.java | 6 +- .../org/apache/phoenix/parse/JoinTableNode.java | 10 +- .../apache/phoenix/parse/ParseNodeFactory.java | 17 +- .../apache/phoenix/parse/ParseNodeRewriter.java | 18 +- .../apache/phoenix/parse/ParseNodeVisitor.java | 3 + .../StatelessTraverseAllParseNodeVisitor.java | 9 + .../parse/TraverseAllParseNodeVisitor.java | 39 + .../parse/TraverseNoParseNodeVisitor.java | 10 + .../parse/UnsupportedAllParseNodeVisitor.java | 25 + .../phoenix/compile/JoinQueryCompilerTest.java | 13 +- .../java/org/apache/phoenix/query/BaseTest.java | 8 + .../java/org/apache/phoenix/util/TestUtil.java | 3 + 27 files changed, 1551 insertions(+), 402 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/5effbbca/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java index 5243a2e..99d601f 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java @@ -118,8 +118,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [I.NAME]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ITEM_TABLE_DISPLAY_NAME, /* * testLeftJoinWithAggregation() @@ -131,8 +130,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.item_id]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [SUM(O.QUANTITY) DESC]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ITEM_TABLE_DISPLAY_NAME + \n + SERVER FILTER BY FIRST KEY ONLY, /* @@ -146,8 +144,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [I.item_id]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [SUM(O.QUANTITY) DESC NULLS LAST, I.item_id]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n + +PARALLEL LEFT-JOIN TABLE 0\n + CLIENT PARALLEL 1-WAY FULL SCAN OVER + JOIN_ORDER_TABLE_DISPLAY_NAME, /* * testRightJoinWithAggregation() @@ -159,8 +156,7 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT { SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n + CLIENT MERGE SORT\n + CLIENT SORTED BY [I.NAME]\n + -PARALLEL EQUI/SEMI/ANTI-JOIN 1 TABLES:\n + -BUILD HASH TABLE 0\n
[2/3] PHOENIX-167 Support semi/anti-joins
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5effbbca/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java -- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java index 075e420..4e882d1 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java @@ -51,6 +51,7 @@ import org.apache.phoenix.parse.ComparisonParseNode; import org.apache.phoenix.parse.DerivedTableNode; import org.apache.phoenix.parse.EqualParseNode; import org.apache.phoenix.parse.HintNode; +import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor; import org.apache.phoenix.parse.HintNode.Hint; import org.apache.phoenix.parse.JoinTableNode; import org.apache.phoenix.parse.JoinTableNode.JoinType; @@ -59,7 +60,6 @@ import org.apache.phoenix.parse.OrderByNode; import org.apache.phoenix.parse.ParseNode; import org.apache.phoenix.parse.ParseNodeFactory; import org.apache.phoenix.parse.SelectStatement; -import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor; import org.apache.phoenix.parse.TableName; import org.apache.phoenix.parse.TableNode; import org.apache.phoenix.parse.TableNodeVisitor; @@ -82,6 +82,7 @@ import org.apache.phoenix.util.SchemaUtil; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ListMultimap; import com.google.common.collect.Lists; +import com.google.common.collect.Sets; public class JoinCompiler { @@ -122,9 +123,9 @@ public class JoinCompiler { joinTable.addFilter(select.getWhere()); } -ColumnParseNodeVisitor generalRefVisitor = new ColumnParseNodeVisitor(resolver); -ColumnParseNodeVisitor joinLocalRefVisitor = new ColumnParseNodeVisitor(resolver); -ColumnParseNodeVisitor prefilterRefVisitor = new ColumnParseNodeVisitor(resolver); +ColumnRefParseNodeVisitor generalRefVisitor = new ColumnRefParseNodeVisitor(resolver); +ColumnRefParseNodeVisitor joinLocalRefVisitor = new ColumnRefParseNodeVisitor(resolver); +ColumnRefParseNodeVisitor prefilterRefVisitor = new ColumnRefParseNodeVisitor(resolver); joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor, prefilterRefVisitor); @@ -256,7 +257,7 @@ public class JoinCompiler { this.prefilterAcceptedTables = new ArrayListJoinTable(); for (int i = lastRightJoinIndex == -1 ? 0 : lastRightJoinIndex; i joinSpecs.size(); i++) { JoinSpec joinSpec = joinSpecs.get(i); -if (joinSpec.getType() != JoinType.Left) { +if (joinSpec.getType() != JoinType.Left joinSpec.getType() != JoinType.Anti) { prefilterAcceptedTables.add(joinSpec.getJoinTable()); } } @@ -306,9 +307,9 @@ public class JoinCompiler { filter.accept(visitor); } -public void pushDownColumnRefVisitors(ColumnParseNodeVisitor generalRefVisitor, -ColumnParseNodeVisitor joinLocalRefVisitor, -ColumnParseNodeVisitor prefilterRefVisitor) throws SQLException { +public void pushDownColumnRefVisitors(ColumnRefParseNodeVisitor generalRefVisitor, +ColumnRefParseNodeVisitor joinLocalRefVisitor, +ColumnRefParseNodeVisitor prefilterRefVisitor) throws SQLException { for (ParseNode node : table.getPreFilters()) { node.accept(prefilterRefVisitor); } @@ -359,14 +360,18 @@ public class JoinCompiler { if (!table.isFlat() || (!useStarJoin count 1 - joinSpecs.get(count - 1).getType() != JoinType.Left)) + joinSpecs.get(count - 1).getType() != JoinType.Left + joinSpecs.get(count - 1).getType() != JoinType.Semi + joinSpecs.get(count - 1).getType() != JoinType.Anti)) return null; boolean[] vector = new boolean[count]; for (int i = 0; i count; i++) { JoinSpec joinSpec = joinSpecs.get(i); if (joinSpec.getType() != JoinType.Left - joinSpec.getType() != JoinType.Inner) + joinSpec.getType() != JoinType.Inner + joinSpec.getType() != JoinType.Semi + joinSpec.getType() != JoinType.Anti) return null; vector[i] = true; IteratorTableRef iter = joinSpec.getDependencies().iterator(); @@ -787,22 +792,22 @@ public class JoinCompiler { } private static class
Jenkins build is back to normal : Phoenix | 4.0 #356
See https://builds.apache.org/job/Phoenix-4.0/356/changes
Build failed in Jenkins: Phoenix | 3.0 | Hadoop1 #242
See https://builds.apache.org/job/Phoenix-3.0-hadoop1/242/changes Changes: [maryannxue] PHOENIX-167 Support semi/anti-joins -- [...truncated 9920 lines...] ToNumberFunctionITBaseClientManagedTimeIT.dropTables:72 null FunkyNamesITBaseClientManagedTimeIT.dropTables:72 null TenantSpecificTablesDMLITBaseClientManagedTimeIT.dropTables:72 null ClientTimeArithmeticQueryITBaseClientManagedTimeIT.dropTables:72 null QueryITBaseClientManagedTimeIT.dropTables:72 null UpsertSelectITBaseClientManagedTimeIT.dropTables:72 null UpsertValuesITBaseClientManagedTimeIT.dropTables:72 null SaltedTableITBaseClientManagedTimeIT.dropTables:72 null ProductMetricsITBaseClientManagedTimeIT.dropTables:72 null ScanQueryITBaseClientManagedTimeIT.dropTables:72 null CompareDecimalToLongITBaseClientManagedTimeIT.dropTables:72 null ArrayITBaseClientManagedTimeIT.dropTables:72 null CreateTableITBaseClientManagedTimeIT.dropTables:72 null TruncateFunctionITBaseClientManagedTimeIT.dropTables:72 null MultiCfQueryExecITBaseClientManagedTimeIT.dropTables:72 null RowValueConstructorITBaseClientManagedTimeIT.dropTables:72 null StddevITBaseClientManagedTimeIT.dropTables:72 null NotQueryITBaseClientManagedTimeIT.dropTables:72 null IsNullITBaseClientManagedTimeIT.dropTables:72 null PercentileITBaseClientManagedTimeIT.dropTables:72 null DistinctCountITBaseClientManagedTimeIT.dropTables:72 null InMemoryOrderByITBaseClientManagedTimeIT.dropTables:72 null SkipRangeParallelIteratorRegionSplitterITBaseClientManagedTimeIT.dropTables:72 null Tests in error: OrderByITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime KeyOnlyIT.doSetup:56-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime CaseStatementITBaseQueryIT.doSetup:84-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime CustomEntityDataITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime ToCharFunctionITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime GroupByITBaseQueryIT.doSetup:84-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime TenantSpecificTablesDDLITBaseTenantSpecificTablesIT.doSetup:88-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime QueryDatabaseMetaDataITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime VariableLengthPKITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime ReadIsolationLevelITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime DerivedTableITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime GroupByCaseITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime DynamicColumnITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime TopNITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime SequenceIT.doSetup:70-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime SpooledOrderByIT.doSetup:38-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime NativeHBaseTypesITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime DynamicUpsertITBaseClientManagedTimeIT.doSetup:63-BaseTest.setUpTestDriver:484-BaseTest.checkClusterInitialized:463-BaseTest.setUpTestCluster:477-BaseTest.initMiniCluster:527 » Runtime
git commit: PHOENIX-1289 Drop index during upsert may abort RS (daniel meng + jyates)
Repository: phoenix Updated Branches: refs/heads/4.0 30d496bcc - 7701ae2ce PHOENIX-1289 Drop index during upsert may abort RS (daniel meng + jyates) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7701ae2c Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7701ae2c Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7701ae2c Branch: refs/heads/4.0 Commit: 7701ae2cebd0f49a059998357d25e531fab4d80a Parents: 30d496b Author: Jesse Yates jya...@apache.org Authored: Mon Oct 6 10:58:14 2014 -0700 Committer: Jesse Yates jya...@apache.org Committed: Mon Oct 6 11:04:34 2014 -0700 -- .../end2end/index/DropIndexDuringUpsertIT.java | 177 ++ .../index/write/KillServerOnFailurePolicy.java | 2 +- .../index/PhoenixIndexFailurePolicy.java| 239 +++ 3 files changed, 316 insertions(+), 102 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/7701ae2c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java new file mode 100644 index 000..4e44ec8 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end.index; + +import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL; +import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; +import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; +import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM; +import static org.apache.phoenix.util.TestUtil.LOCALHOST; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; +import org.apache.phoenix.jdbc.PhoenixTestDriver; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.SchemaUtil; +import org.apache.phoenix.util.StringUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Maps; + +@Category(NeedsOwnMiniClusterTest.class) +public class DropIndexDuringUpsertIT extends BaseTest { +private static final int NUM_SLAVES = 4; +private static String url; +private static PhoenixTestDriver driver; +private static HBaseTestingUtility util; + +private static ExecutorService service = Executors.newCachedThreadPool(); + +private static final String SCHEMA_NAME = S; +private static final String INDEX_TABLE_NAME = I; +private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, T); +private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, I); + +@Before +public void doSetup() throws Exception { +
git commit: PHOENIX-1289 Drop index during upsert may abort RS (daniel meng + jyates)
Repository: phoenix Updated Branches: refs/heads/master 909d97596 - faeab9355 PHOENIX-1289 Drop index during upsert may abort RS (daniel meng + jyates) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/faeab935 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/faeab935 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/faeab935 Branch: refs/heads/master Commit: faeab935554404a042285a01127e9b88b8e3a47c Parents: 909d975 Author: Jesse Yates jya...@apache.org Authored: Mon Oct 6 10:58:14 2014 -0700 Committer: Jesse Yates jya...@apache.org Committed: Mon Oct 6 11:04:58 2014 -0700 -- .../end2end/index/DropIndexDuringUpsertIT.java | 177 ++ .../index/write/KillServerOnFailurePolicy.java | 2 +- .../index/PhoenixIndexFailurePolicy.java| 239 +++ 3 files changed, 316 insertions(+), 102 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/faeab935/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java new file mode 100644 index 000..4e44ec8 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropIndexDuringUpsertIT.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end.index; + +import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL; +import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; +import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; +import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM; +import static org.apache.phoenix.util.TestUtil.LOCALHOST; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; +import org.apache.phoenix.jdbc.PhoenixTestDriver; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.SchemaUtil; +import org.apache.phoenix.util.StringUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Maps; + +@Category(NeedsOwnMiniClusterTest.class) +public class DropIndexDuringUpsertIT extends BaseTest { +private static final int NUM_SLAVES = 4; +private static String url; +private static PhoenixTestDriver driver; +private static HBaseTestingUtility util; + +private static ExecutorService service = Executors.newCachedThreadPool(); + +private static final String SCHEMA_NAME = S; +private static final String INDEX_TABLE_NAME = I; +private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, T); +private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, I); + +@Before +public void doSetup() throws Exception { +
Build failed in Jenkins: Phoenix | Master #403
See https://builds.apache.org/job/Phoenix-master/403/changes Changes: [jyates] PHOENIX-1289 Drop index during upsert may abort RS (daniel meng + jyates) -- [...truncated 507 lines...] Tests run: 48, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.255 sec - in org.apache.phoenix.end2end.ArrayIT Running org.apache.phoenix.end2end.StddevIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.559 sec - in org.apache.phoenix.end2end.StddevIT Running org.apache.phoenix.end2end.MultiCfQueryExecIT Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.866 sec - in org.apache.phoenix.end2end.MultiCfQueryExecIT Running org.apache.phoenix.end2end.NotQueryIT Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 29.095 sec - in org.apache.phoenix.end2end.RowValueConstructorIT Running org.apache.phoenix.end2end.IsNullIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.784 sec - in org.apache.phoenix.end2end.IsNullIT Running org.apache.phoenix.end2end.PercentileIT Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.456 sec - in org.apache.phoenix.end2end.PercentileIT Running org.apache.phoenix.end2end.DistinctCountIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.661 sec - in org.apache.phoenix.end2end.DistinctCountIT Running org.apache.phoenix.end2end.InMemoryOrderByIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.089 sec - in org.apache.phoenix.end2end.InMemoryOrderByIT Running org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 8, Time elapsed: 0.128 sec - in org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 105.201 sec - in org.apache.phoenix.end2end.ScanQueryIT Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 170.724 sec - in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 70.705 sec - in org.apache.phoenix.end2end.NotQueryIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 208.946 sec - in org.apache.phoenix.end2end.QueryIT Results : Tests run: 1266, Failures: 0, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-master/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.SpillableGroupByIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.276 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.CSVCommonsLoaderIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.end2end.EncodeFunctionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.564 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.end2end.ParallelIteratorsIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.464 sec - in org.apache.phoenix.end2end.SpillableGroupByIT Running org.apache.phoenix.end2end.SkipScanAfterManualSplitIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.268 sec - in org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.787 sec - in org.apache.phoenix.end2end.ParallelIteratorsIT Running org.apache.phoenix.end2end.EvaluationOfORIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.21 sec - in org.apache.phoenix.end2end.EvaluationOfORIT Running org.apache.phoenix.end2end.index.SaltedIndexIT Tests run: 14, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.835 sec - in org.apache.phoenix.end2end.CSVCommonsLoaderIT Running org.apache.phoenix.end2end.index.DropViewIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.729 sec - in org.apache.phoenix.end2end.index.DropViewIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.352 sec - in org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT Running org.apache.phoenix.end2end.index.ViewIndexIT Running org.apache.phoenix.end2end.index.LocalIndexIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.294
Build failed in Jenkins: Phoenix | 4.0 #357
See https://builds.apache.org/job/Phoenix-4.0/357/changes Changes: [jyates] PHOENIX-1289 Drop index during upsert may abort RS (daniel meng + jyates) -- [...truncated 505 lines...] Running org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 84.018 sec - in org.apache.phoenix.end2end.ScanQueryIT Running org.apache.phoenix.end2end.ColumnProjectionOptimizationIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.869 sec - in org.apache.phoenix.end2end.ColumnProjectionOptimizationIT Running org.apache.phoenix.end2end.NotQueryIT Tests run: 91, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 73.83 sec - in org.apache.phoenix.end2end.GroupByIT Running org.apache.phoenix.end2end.QueryDatabaseMetaDataIT Tests run: 63, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 46.038 sec - in org.apache.phoenix.end2end.CaseStatementIT Running org.apache.phoenix.end2end.UpsertValuesIT Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.505 sec - in org.apache.phoenix.end2end.UpsertValuesIT Running org.apache.phoenix.end2end.RowValueConstructorIT Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 35.531 sec - in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT Running org.apache.phoenix.end2end.InMemoryOrderByIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.889 sec - in org.apache.phoenix.end2end.InMemoryOrderByIT Running org.apache.phoenix.end2end.TopNIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.826 sec - in org.apache.phoenix.end2end.TopNIT Running org.apache.phoenix.end2end.ArrayIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 178.744 sec - in org.apache.phoenix.end2end.QueryIT Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 24.292 sec - in org.apache.phoenix.end2end.RowValueConstructorIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 61.356 sec - in org.apache.phoenix.end2end.NotQueryIT Tests run: 48, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.152 sec - in org.apache.phoenix.end2end.ArrayIT Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 142.813 sec - in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Results : Tests run: 1266, Failures: 0, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-4.0/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.end2end.HashJoinIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.733 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.end2end.SkipScanQueryIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.586 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.end2end.SortOrderFIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.543 sec - in org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.end2end.StatementHintsIT Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.794 sec - in org.apache.phoenix.end2end.SortOrderFIT Running org.apache.phoenix.end2end.ArithmeticQueryIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.127 sec - in org.apache.phoenix.end2end.StatementHintsIT Running org.apache.phoenix.end2end.ViewIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.462 sec - in org.apache.phoenix.end2end.SkipScanQueryIT Running org.apache.phoenix.end2end.ConvertTimezoneFunctionIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.961 sec - in org.apache.phoenix.end2end.ConvertTimezoneFunctionIT Running org.apache.phoenix.end2end.RegexpSubstrFunctionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.475 sec - in org.apache.phoenix.end2end.RegexpSubstrFunctionIT Running org.apache.phoenix.end2end.TimezoneOffsetFunctionIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.684 sec - in org.apache.phoenix.end2end.TimezoneOffsetFunctionIT Running org.apache.phoenix.end2end.StatsCollectorIT Tests run: 20, Failures: 0, Errors: 0, Skipped: 0, Time
git commit: PHOENIX-1305 create index throws NPE when dataTable has specified default column family (daniel meng)
Repository: phoenix Updated Branches: refs/heads/4.0 7701ae2ce - e9094d0a4 PHOENIX-1305 create index throws NPE when dataTable has specified default column family (daniel meng) Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e9094d0a Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e9094d0a Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e9094d0a Branch: refs/heads/4.0 Commit: e9094d0a4cee18161197cffa57d2c9868c23b0a4 Parents: 7701ae2 Author: Jesse Yates jya...@apache.org Authored: Mon Oct 6 11:42:13 2014 -0700 Committer: Jesse Yates jya...@apache.org Committed: Mon Oct 6 11:42:13 2014 -0700 -- .../phoenix/end2end/index/MutableIndexIT.java | 50 .../phoenix/parse/CreateIndexStatement.java | 3 +- 2 files changed, 52 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9094d0a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java index b093acb..8c9256d 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java @@ -34,10 +34,16 @@ import java.util.Map; import java.util.Properties; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.phoenix.compile.ColumnResolver; +import org.apache.phoenix.compile.FromCompiler; import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT; import org.apache.phoenix.end2end.HBaseManagedTimeTest; import org.apache.phoenix.end2end.Shadower; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.parse.NamedTableNode; +import org.apache.phoenix.parse.TableName; import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.schema.PTable; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; @@ -63,6 +69,50 @@ public class MutableIndexIT extends BaseMutableIndexIT { props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); } + +@Test +public void testIndexCreateWithoutOptions() throws Exception { +createIndexOnTableWithSpecifiedDefaultCF(false); +} + +@Test +public void testIndexCreateWithOptions() throws Exception { +createIndexOnTableWithSpecifiedDefaultCF(true); +} + +private void createIndexOnTableWithSpecifiedDefaultCF(boolean hasOptions) throws Exception { +String query; +ResultSet rs; + +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); +Connection conn = DriverManager.getConnection(getUrl(), props); +conn.createStatement().execute( +CREATE TABLE + DATA_TABLE_FULL_NAME + (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) DEFAULT_COLUMN_FAMILY='A'); +query = SELECT * FROM + DATA_TABLE_FULL_NAME; +rs = conn.createStatement().executeQuery(query); +assertFalse(rs.next()); + +String options = hasOptions ? SALT_BUCKETS=10, MULTI_TENANT=true, IMMUTABLE_ROWS=true, DISABLE_WAL=true : ; +conn.createStatement().execute( +CREATE INDEX + INDEX_TABLE_NAME + ON + DATA_TABLE_FULL_NAME + (v1) INCLUDE (v2) + options); +query = SELECT * FROM + INDEX_TABLE_FULL_NAME; +rs = conn.createStatement().executeQuery(query); +assertFalse(rs.next()); + +//check options set correctly on index +TableName indexName = TableName.create(SCHEMA_NAME, INDEX_TABLE_NAME); +NamedTableNode indexNode = NamedTableNode.create(null, indexName, null); +ColumnResolver resolver = FromCompiler.getResolver(indexNode, conn.unwrap(PhoenixConnection.class)); +PTable indexTable = resolver.getTables().get(0).getTable(); +// Can't set IMMUTABLE_ROWS, MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an index +assertNull(indexTable.getDefaultFamilyName()); +assertFalse(indexTable.isMultiTenant()); +assertFalse(indexTable.isImmutableRows()); +if(hasOptions) { +assertEquals(10, indexTable.getBucketNum().intValue()); +assertTrue(indexTable.isWALDisabled()); +} +} @Test public void testIndexWithNullableFixedWithCols() throws Exception { http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9094d0a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java
git commit: PHOENIX-1107 Support mutable indexes over replication
Repository: phoenix Updated Branches: refs/heads/4.0 e9094d0a4 - 763f10f00 PHOENIX-1107 Support mutable indexes over replication Adding test to ensure that we still have indexes working over replication, rather than just relying on the fact that it 'just works'. Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/763f10f0 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/763f10f0 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/763f10f0 Branch: refs/heads/4.0 Commit: 763f10f00ff5f26c1a2df9b19f430253ee331d90 Parents: e9094d0 Author: Jesse Yates jya...@apache.org Authored: Mon Oct 6 11:50:47 2014 -0700 Committer: Jesse Yates jya...@apache.org Committed: Mon Oct 6 11:55:11 2014 -0700 -- .../index/MutableIndexReplicationIT.java| 280 +++ 1 file changed, 280 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/763f10f0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java new file mode 100644 index 000..9981ed8 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java @@ -0,0 +1,280 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end.index; + +import com.google.common.collect.Maps; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; +import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver; +import org.apache.phoenix.jdbc.PhoenixTestDriver; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.*; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; + +/** + * Test that we correctly replicate indexes over replication + * p + * Code for setUp/teardown copied from org.apache.hadoop.hbase.replication.TestReplicationBase in + * HBase 0.98.5 + * /p + */ +@Category(NeedsOwnMiniClusterTest.class) +public class MutableIndexReplicationIT extends BaseTest { + +private static final Log LOG = LogFactory.getLog(MutableIndexReplicationIT.class); + +public static final String SCHEMA_NAME = ; +public static final String DATA_TABLE_NAME = T; +public static final String INDEX_TABLE_NAME = I; +public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, T); +public static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, I); +private static final long REPLICATION_WAIT_TIME_MILLIS = 1; + +protected static PhoenixTestDriver driver; +private static String URL; + +protected static Configuration conf1 = HBaseConfiguration.create(); +protected static Configuration conf2; + +protected static ZooKeeperWatcher zkw1; +protected static ZooKeeperWatcher zkw2; + +protected static ReplicationAdmin admin; + +protected static HBaseTestingUtility utility1; +protected static HBaseTestingUtility utility2; +protected static
git commit: PHOENIX-1107 Support mutable indexes over replication
Repository: phoenix Updated Branches: refs/heads/master ea0a502ce - 3b30690f6 PHOENIX-1107 Support mutable indexes over replication Adding test to ensure that we still have indexes working over replication, rather than just relying on the fact that it 'just works'. Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3b30690f Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3b30690f Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3b30690f Branch: refs/heads/master Commit: 3b30690f6fdb11cc6c5d4bf53ee709b6e97196f7 Parents: ea0a502 Author: Jesse Yates jya...@apache.org Authored: Mon Oct 6 11:50:47 2014 -0700 Committer: Jesse Yates jya...@apache.org Committed: Mon Oct 6 11:53:23 2014 -0700 -- .../index/MutableIndexReplicationIT.java| 280 +++ 1 file changed, 280 insertions(+) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/3b30690f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java new file mode 100644 index 000..9981ed8 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java @@ -0,0 +1,280 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end.index; + +import com.google.common.collect.Maps; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; +import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver; +import org.apache.phoenix.jdbc.PhoenixTestDriver; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.util.*; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; + +/** + * Test that we correctly replicate indexes over replication + * p + * Code for setUp/teardown copied from org.apache.hadoop.hbase.replication.TestReplicationBase in + * HBase 0.98.5 + * /p + */ +@Category(NeedsOwnMiniClusterTest.class) +public class MutableIndexReplicationIT extends BaseTest { + +private static final Log LOG = LogFactory.getLog(MutableIndexReplicationIT.class); + +public static final String SCHEMA_NAME = ; +public static final String DATA_TABLE_NAME = T; +public static final String INDEX_TABLE_NAME = I; +public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, T); +public static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, I); +private static final long REPLICATION_WAIT_TIME_MILLIS = 1; + +protected static PhoenixTestDriver driver; +private static String URL; + +protected static Configuration conf1 = HBaseConfiguration.create(); +protected static Configuration conf2; + +protected static ZooKeeperWatcher zkw1; +protected static ZooKeeperWatcher zkw2; + +protected static ReplicationAdmin admin; + +protected static HBaseTestingUtility utility1; +protected static HBaseTestingUtility utility2; +protected
Jenkins build is back to normal : Phoenix | Master #404
See https://builds.apache.org/job/Phoenix-master/404/changes
Build failed in Jenkins: Phoenix | 4.0 #358
See https://builds.apache.org/job/Phoenix-4.0/358/changes Changes: [jyates] PHOENIX-1305 create index throws NPE when dataTable has specified default column family (daniel meng) -- [...truncated 505 lines...] Running org.apache.phoenix.end2end.NativeHBaseTypesIT Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.291 sec - in org.apache.phoenix.end2end.NativeHBaseTypesIT Running org.apache.phoenix.end2end.CompareDecimalToLongIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.304 sec - in org.apache.phoenix.end2end.salted.SaltedTableIT Running org.apache.phoenix.end2end.TruncateFunctionIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.235 sec - in org.apache.phoenix.end2end.CompareDecimalToLongIT Running org.apache.phoenix.end2end.QueryDatabaseMetaDataIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.781 sec - in org.apache.phoenix.end2end.TruncateFunctionIT Running org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 8, Time elapsed: 0.12 sec - in org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Running org.apache.phoenix.end2end.PercentileIT Tests run: 17, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.093 sec - in org.apache.phoenix.end2end.PercentileIT Running org.apache.phoenix.end2end.ToNumberFunctionIT Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.2 sec - in org.apache.phoenix.end2end.ToNumberFunctionIT Running org.apache.phoenix.end2end.OrderByIT Tests run: 91, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 76.487 sec - in org.apache.phoenix.end2end.GroupByIT Running org.apache.phoenix.end2end.GroupByCaseIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.38 sec - in org.apache.phoenix.end2end.OrderByIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.556 sec - in org.apache.phoenix.end2end.GroupByCaseIT Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 148.164 sec - in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 34.115 sec - in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 187.833 sec - in org.apache.phoenix.end2end.QueryIT Results : Tests run: 1266, Failures: 0, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-4.0/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.end2end.AutoCommitIT Running org.apache.phoenix.end2end.DeleteIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.trace.PhoenixTraceReaderIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.652 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.372 sec - in org.apache.phoenix.end2end.AutoCommitIT Running org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.end2end.ModulusExpressionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.532 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.QueryExecWithoutSCNIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.639 sec - in org.apache.phoenix.end2end.QueryExecWithoutSCNIT Running org.apache.phoenix.end2end.LastValueFunctionIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.033 sec - in org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.end2end.QueryMoreIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.124 sec - in org.apache.phoenix.end2end.ModulusExpressionIT Running org.apache.phoenix.end2end.EvaluationOfORIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.834 sec - in org.apache.phoenix.end2end.EvaluationOfORIT Running org.apache.phoenix.end2end.PhoenixEncodeDecodeIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.241 sec - in org.apache.phoenix.end2end.LastValueFunctionIT Running org.apache.phoenix.end2end.StatementHintsIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.267 sec - in org.apache.phoenix.end2end.StatementHintsIT Running
Build failed in Jenkins: Phoenix | Master #405
See https://builds.apache.org/job/Phoenix-master/405/changes Changes: [jyates] PHOENIX-1107 Support mutable indexes over replication -- [...truncated 1006 lines...] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 21196 bytes could not be allocated from remaining memory of 21196 bytes from global pool of 4 bytes after waiting for 0ms. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:81) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.cache.aggcache.SpillableGroupByCache.init(SpillableGroupByCache.java:150) at org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver$GroupByCacheFactory.newCache(GroupedAggregateRegionObserver.java:365) at org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.scanUnordered(GroupedAggregateRegionObserver.java:400) at org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.doPostScannerOpen(GroupedAggregateRegionObserver.java:161) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:140) ... 8 more at org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:107) at org.apache.phoenix.iterate.TableResultIterator.init(TableResultIterator.java:57) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:583) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:578) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: org.apache.hadoop.hbase.DoNotRetryIOException: KVBIGINTVALUETEST,,1412623282791.e6d143e73abe3735cd8fc02503accd91.: Requested memory of 21196 bytes could not be allocated from remaining memory of 21196 bytes from global pool of 4 bytes after waiting for 0ms. at org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:83) at org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:51) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:158) at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845) at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 21196 bytes could not be allocated from remaining memory of 21196 bytes from global pool of 4 bytes after waiting for 0ms. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:81) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.cache.aggcache.SpillableGroupByCache.init(SpillableGroupByCache.java:150) at org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver$GroupByCacheFactory.newCache(GroupedAggregateRegionObserver.java:365) at org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.scanUnordered(GroupedAggregateRegionObserver.java:400) at org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver.doPostScannerOpen(GroupedAggregateRegionObserver.java:161) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:140) ... 8 more at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at
git commit: PHOENIX-1321 Cleanup setting of timestamps when collecting and using stats
Repository: phoenix Updated Branches: refs/heads/master 3b30690f6 - 7dbd3503b PHOENIX-1321 Cleanup setting of timestamps when collecting and using stats Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7dbd3503 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7dbd3503 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7dbd3503 Branch: refs/heads/master Commit: 7dbd3503bf19f1dc9ae3adb573c40fc72803295d Parents: 3b30690 Author: James Taylor jtay...@salesforce.com Authored: Mon Oct 6 16:02:37 2014 -0700 Committer: James Taylor jtay...@salesforce.com Committed: Mon Oct 6 16:04:49 2014 -0700 -- .../end2end/BaseTenantSpecificTablesIT.java | 2 +- .../org/apache/phoenix/end2end/KeyOnlyIT.java | 33 -- .../phoenix/end2end/MultiCfQueryExecIT.java | 27 +++-- .../phoenix/end2end/ParallelIteratorsIT.java| 2 +- .../phoenix/end2end/StatsCollectorIT.java | 2 +- .../phoenix/end2end/index/SaltedIndexIT.java| 2 +- .../phoenix/mapreduce/CsvBulkLoadToolIT.java| 22 +++- .../coprocessor/MetaDataEndpointImpl.java | 105 +++-- .../UngroupedAggregateRegionObserver.java | 16 ++- .../org/apache/phoenix/query/QueryServices.java | 8 +- .../phoenix/query/QueryServicesOptions.java | 28 +++-- .../apache/phoenix/schema/MetaDataClient.java | 2 +- .../schema/stat/StatisticsCollector.java| 27 +++-- .../phoenix/schema/stat/StatisticsScanner.java | 8 +- .../phoenix/schema/stat/StatisticsTable.java| 90 +++ .../phoenix/schema/stat/StatisticsUtils.java| 115 ++- .../org/apache/phoenix/util/MetaDataUtil.java | 12 ++ .../java/org/apache/phoenix/query/BaseTest.java | 2 +- .../phoenix/query/QueryServicesTestImpl.java| 8 +- .../java/org/apache/phoenix/util/TestUtil.java | 13 +++ 20 files changed, 285 insertions(+), 239 deletions(-) -- http://git-wip-us.apache.org/repos/asf/phoenix/blob/7dbd3503/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java index bcae7ed..b8fa035 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java @@ -84,7 +84,7 @@ public abstract class BaseTenantSpecificTablesIT extends BaseClientManagedTimeIT public static void doSetup() throws Exception { MapString,String props = Maps.newHashMapWithExpectedSize(3); // Must update config before starting server -props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20l)); +props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20l)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); PHOENIX_JDBC_TENANT_SPECIFIC_URL = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID; PHOENIX_JDBC_TENANT_SPECIFIC_URL2 = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID2; http://git-wip-us.apache.org/repos/asf/phoenix/blob/7dbd3503/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java -- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java index 4dee5d8..9b26c2e 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java @@ -52,7 +52,7 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT { public static void doSetup() throws Exception { MapString,String props = Maps.newHashMapWithExpectedSize(3); // Must update config before starting server -props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB, Long.toString(20l)); +props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); } @Test @@ -60,11 +60,14 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT { long ts = nextTimestamp(); ensureTableCreated(getUrl(),KEYONLY_NAME,null, ts); initTableValues(ts+1); -Properties props = new Properties(); +Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); +props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+30)); +Connection conn3 = DriverManager.getConnection(getUrl(), props); +
Build failed in Jenkins: Phoenix | Master #406
See https://builds.apache.org/job/Phoenix-master/406/changes Changes: [jtaylor] PHOENIX-1321 Cleanup setting of timestamps when collecting and using stats -- [...truncated 512 lines...] Running org.apache.phoenix.end2end.GroupByCaseIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.871 sec - in org.apache.phoenix.end2end.GroupByCaseIT Running org.apache.phoenix.end2end.CustomEntityDataIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.93 sec - in org.apache.phoenix.end2end.CustomEntityDataIT Running org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 77.772 sec - in org.apache.phoenix.end2end.ScanQueryIT Running org.apache.phoenix.end2end.ColumnProjectionOptimizationIT Tests run: 91, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 66.716 sec - in org.apache.phoenix.end2end.GroupByIT Running org.apache.phoenix.end2end.NotQueryIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.655 sec - in org.apache.phoenix.end2end.ColumnProjectionOptimizationIT Running org.apache.phoenix.end2end.QueryDatabaseMetaDataIT Tests run: 63, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 44.158 sec - in org.apache.phoenix.end2end.CaseStatementIT Running org.apache.phoenix.end2end.UpsertValuesIT Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.377 sec - in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT Running org.apache.phoenix.end2end.RowValueConstructorIT Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.381 sec - in org.apache.phoenix.end2end.UpsertValuesIT Running org.apache.phoenix.end2end.InMemoryOrderByIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.972 sec - in org.apache.phoenix.end2end.InMemoryOrderByIT Running org.apache.phoenix.end2end.TopNIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.639 sec - in org.apache.phoenix.end2end.TopNIT Running org.apache.phoenix.end2end.ArrayIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 164.284 sec - in org.apache.phoenix.end2end.QueryIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 53.877 sec - in org.apache.phoenix.end2end.NotQueryIT Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 24.349 sec - in org.apache.phoenix.end2end.RowValueConstructorIT Tests run: 48, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.557 sec - in org.apache.phoenix.end2end.ArrayIT Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 135.717 sec - in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Results : Failed tests: TenantSpecificTablesDMLIT.testBasicUpsertSelect2:147 expected:3 but was:1 Tests run: 1266, Failures: 1, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-master/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.end2end.SkipScanQueryIT Running org.apache.phoenix.end2end.HashJoinIT Running org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.392 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.EncodeFunctionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.082 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.end2end.SortOrderFIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.604 sec - in org.apache.phoenix.end2end.EncodeFunctionIT Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.067 sec - in org.apache.phoenix.end2end.SortOrderFIT Running org.apache.phoenix.end2end.ArithmeticQueryIT Running org.apache.phoenix.end2end.StatementHintsIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.792 sec - in org.apache.phoenix.end2end.SkipScanQueryIT Running org.apache.phoenix.end2end.ViewIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.445 sec - in org.apache.phoenix.end2end.StatementHintsIT Running org.apache.phoenix.end2end.ConvertTimezoneFunctionIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.967 sec - in org.apache.phoenix.end2end.ConvertTimezoneFunctionIT Running
Build failed in Jenkins: Phoenix | 4.0 #360
See https://builds.apache.org/job/Phoenix-4.0/360/changes Changes: [jtaylor] PHOENIX-1321 Cleanup setting of timestamps when collecting and using stats -- [...truncated 527 lines...] Running org.apache.phoenix.end2end.InMemoryOrderByIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.491 sec - in org.apache.phoenix.end2end.DistinctCountIT Running org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 8, Failures: 0, Errors: 0, Skipped: 8, Time elapsed: 0.122 sec - in org.apache.phoenix.end2end.SkipRangeParallelIteratorRegionSplitterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.767 sec - in org.apache.phoenix.end2end.InMemoryOrderByIT Tests run: 203, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 133.193 sec - in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT Tests run: 77, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 51.633 sec - in org.apache.phoenix.end2end.NotQueryIT Tests run: 182, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 163.613 sec - in org.apache.phoenix.end2end.QueryIT Results : Failed tests: TenantSpecificTablesDMLIT.testBasicUpsertSelect2:147 expected:3 but was:1 Tests run: 1266, Failures: 1, Errors: 0, Skipped: 8 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (HBaseManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: https://builds.apache.org/job/Phoenix-4.0/ws/phoenix-core/target/failsafe-reports [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true --- T E S T S --- --- T E S T S --- Running org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.end2end.SpillableGroupByIT Running org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.697 sec - in org.apache.phoenix.trace.PhoenixTraceReaderIT Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT Running org.apache.phoenix.end2end.CSVCommonsLoaderIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.465 sec - in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT Running org.apache.phoenix.end2end.ParallelIteratorsIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.34 sec - in org.apache.phoenix.end2end.SpillableGroupByIT Running org.apache.phoenix.end2end.SkipScanAfterManualSplitIT Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.492 sec - in org.apache.phoenix.end2end.EncodeFunctionIT Running org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.162 sec - in org.apache.phoenix.end2end.ParallelIteratorsIT Running org.apache.phoenix.end2end.EvaluationOfORIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.592 sec - in org.apache.phoenix.end2end.EvaluationOfORIT Running org.apache.phoenix.end2end.index.SaltedIndexIT Tests run: 14, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.307 sec - in org.apache.phoenix.end2end.CSVCommonsLoaderIT Running org.apache.phoenix.end2end.index.DropViewIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.488 sec - in org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT Running org.apache.phoenix.end2end.index.ViewIndexIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.025 sec - in org.apache.phoenix.end2end.index.DropViewIT Running org.apache.phoenix.end2end.index.LocalIndexIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.004 sec - in org.apache.phoenix.end2end.index.ViewIndexIT Running org.apache.phoenix.end2end.index.IndexMetadataIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.665 sec - in org.apache.phoenix.end2end.index.SaltedIndexIT Running org.apache.phoenix.end2end.index.MutableIndexIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.505 sec - in org.apache.phoenix.end2end.SkipScanAfterManualSplitIT Running org.apache.phoenix.end2end.index.ImmutableIndexIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.88 sec - in org.apache.phoenix.end2end.index.IndexMetadataIT Running org.apache.phoenix.end2end.BinaryRowKeyIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.346 sec - in org.apache.phoenix.end2end.BinaryRowKeyIT Running org.apache.phoenix.end2end.DynamicFamilyIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.821 sec - in org.apache.phoenix.end2end.DynamicFamilyIT Running org.apache.phoenix.end2end.AlterTableIT Tests run: 13, Failures: 0, Errors: 0,