Repository: phoenix Updated Branches: refs/heads/master 1fb6b1b79 -> cbb38c273
PHOENIX-2724 Addendum: Prevent queries from executing serially inspite of the hint Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cbb38c27 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cbb38c27 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cbb38c27 Branch: refs/heads/master Commit: cbb38c2730c66701a925f5bbe667564116dc13ce Parents: 1fb6b1b Author: Samarth <[email protected]> Authored: Thu Apr 28 14:05:05 2016 -0700 Committer: Samarth <[email protected]> Committed: Thu Apr 28 14:05:05 2016 -0700 ---------------------------------------------------------------------- .../apache/phoenix/execute/AggregatePlan.java | 13 +++++++++++- .../org/apache/phoenix/execute/ScanPlan.java | 21 ++++++++++---------- .../java/org/apache/phoenix/util/ScanUtil.java | 15 ++++++++++++++ 3 files changed, 37 insertions(+), 12 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/cbb38c27/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java index b125ecc..b60cd7e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java @@ -59,10 +59,13 @@ import org.apache.phoenix.parse.HintNode; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.util.ScanUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -76,6 +79,8 @@ public class AggregatePlan extends BaseQueryPlan { private final Expression having; private List<KeyRange> splits; private List<List<Scan>> scans; + private static final Logger logger = LoggerFactory.getLogger(AggregatePlan.class); + public AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, @@ -199,7 +204,13 @@ public class AggregatePlan extends BaseQueryPlan { PInteger.INSTANCE.toBytes(limit + (offset == null ? 0 : offset))); } } - BaseResultIterators iterators = statement.getHint().hasHint(HintNode.Hint.SERIAL) + PTable table = tableRef.getTable(); + boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); + boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); + if (hasSerialHint && !canBeExecutedSerially) { + logger.warn("This query cannot be executed serially. Ignoring the hint"); + } + BaseResultIterators iterators = hasSerialHint && canBeExecutedSerially ? new SerialIterators(this, null, null, wrapParallelIteratorFactory(), scanGrouper) : new ParallelIterators(this, null, wrapParallelIteratorFactory()); http://git-wip-us.apache.org/repos/asf/phoenix/blob/cbb38c27/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java index c5dabfe..980db52 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java @@ -102,9 +102,18 @@ public class ScanPlan extends BaseQueryPlan { private static boolean isSerial(StatementContext context, FilterableStatement statement, TableRef tableRef, OrderBy orderBy, Integer limit, Integer offset, boolean allowPageFilter) throws SQLException { - if (statement.getHint().hasHint(HintNode.Hint.SERIAL)) { + PTable table = tableRef.getTable(); + boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); + boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); + if (!canBeExecutedSerially) { + if (hasSerialHint) { + logger.warn("This query cannot be executed serially. Ignoring the hint"); + } + return false; + } else if (hasSerialHint) { return true; } + Scan scan = context.getScan(); /* * If a limit is provided and we have no filter, run the scan serially when we estimate that @@ -115,16 +124,6 @@ public class ScanPlan extends BaseQueryPlan { if (perScanLimit == null || scan.getFilter() != null) { return false; } - PTable table = tableRef.getTable(); - /* - * For salted or local index tables, if rows are requested in a row key order, then we - * cannot execute a query serially. We need to be able to do a merge sort across all scans - * which isn't possible with SerialIterators. For other kinds of tables though we are ok - * since SerialIterators execute scans in the correct order. - */ - if ((table.getBucketNum() != null || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) { - return false; - } GuidePostsInfo gpsInfo = table.getTableStats().getGuidePosts().get(SchemaUtil.getEmptyColumnFamily(table)); long estRowSize = SchemaUtil.estimateRowSize(table); long estRegionSize; http://git-wip-us.apache.org/repos/asf/phoenix/blob/cbb38c27/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java index 46589b9..696f051 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java @@ -58,6 +58,8 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.IllegalDataException; import org.apache.phoenix.schema.PName; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.RowKeySchema; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.ValueSchema.Field; @@ -807,5 +809,18 @@ public class ScanUtil { public static void addOffsetAttribute(Scan scan, Integer offset) { scan.setAttribute(BaseScannerRegionObserver.SCAN_OFFSET, Bytes.toBytes(offset)); } + + public static final boolean canQueryBeExecutedSerially(PTable table, OrderBy orderBy, StatementContext context) { + /* + * For salted or local index tables, if rows are requested in a row key order, then we + * cannot execute a query serially. We need to be able to do a merge sort across all scans + * which isn't possible with SerialIterators. For other kinds of tables though we are ok + * since SerialIterators execute scans in the correct order. + */ + if ((table.getBucketNum() != null || table.getIndexType() == IndexType.LOCAL) && shouldRowsBeInRowKeyOrder(orderBy, context)) { + return false; + } + return true; + } } \ No newline at end of file
