This is an automated email from the ASF dual-hosted git repository.
karan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 9e617373a0 Handle dimensionless group by queries with partitioning
9e617373a0 is described below
commit 9e617373a0cd2467e8565a54335849c9f48b39d0
Author: Laksh Singla <[email protected]>
AuthorDate: Fri Jul 7 21:51:47 2023 +0530
Handle dimensionless group by queries with partitioning
---
.../msq/querykit/groupby/GroupByQueryKit.java | 10 +++-
.../org/apache/druid/msq/exec/MSQInsertTest.java | 61 ++++++++++++++++++++++
2 files changed, 69 insertions(+), 2 deletions(-)
diff --git
a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/querykit/groupby/GroupByQueryKit.java
b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/querykit/groupby/GroupByQueryKit.java
index 9f83f8f210..3b4ea36eb0 100644
---
a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/querykit/groupby/GroupByQueryKit.java
+++
b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/querykit/groupby/GroupByQueryKit.java
@@ -97,8 +97,9 @@ public class GroupByQueryKit implements QueryKit<GroupByQuery>
final Granularity segmentGranularity =
QueryKitUtils.getSegmentGranularityFromContext(jsonMapper,
queryToRun.getContext());
final RowSignature intermediateSignature =
computeIntermediateSignature(queryToRun);
+ final ClusterBy resultClusterByWithoutGranularity =
computeClusterByForResults(queryToRun);
final ClusterBy resultClusterBy =
-
QueryKitUtils.clusterByWithSegmentGranularity(computeClusterByForResults(queryToRun),
segmentGranularity);
+
QueryKitUtils.clusterByWithSegmentGranularity(resultClusterByWithoutGranularity,
segmentGranularity);
final RowSignature resultSignature =
QueryKitUtils.sortableSignature(
QueryKitUtils.signatureWithSegmentGranularity(computeResultSignature(queryToRun),
segmentGranularity),
@@ -114,7 +115,12 @@ public class GroupByQueryKit implements
QueryKit<GroupByQuery>
final ShuffleSpecFactory shuffleSpecFactoryPreAggregation;
final ShuffleSpecFactory shuffleSpecFactoryPostAggregation;
- if (intermediateClusterBy.getColumns().isEmpty()) {
+ // There can be a situation where intermediateClusterBy is empty, while
the result is non-empty
+ // if we have PARTITIONED BY on anything except ALL, however we don't have
a grouping dimension
+ // (i.e. no GROUP BY clause)
+ // __time in such queries is generated using either an aggregator (e.g.
sum(metric) as __time) or using a
+ // post-aggregator (e.g. TIMESTAMP '2000-01-01' as __time)
+ if (intermediateClusterBy.getColumns().isEmpty() &&
resultClusterBy.isEmpty()) {
// Ignore shuffleSpecFactory, since we know only a single partition will
come out, and we can save some effort.
shuffleSpecFactoryPreAggregation =
ShuffleSpecFactories.singlePartition();
shuffleSpecFactoryPostAggregation =
ShuffleSpecFactories.singlePartition();
diff --git
a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java
b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java
index fde14fa301..63170e91d4 100644
---
a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java
+++
b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java
@@ -406,6 +406,67 @@ public class MSQInsertTest extends MSQTestBase
}
+ @Test
+ public void testInsertOnFoo1WithTimeAggregator()
+ {
+ RowSignature rowSignature = RowSignature.builder()
+ .add("__time", ColumnType.LONG)
+ .build();
+
+ testIngestQuery().setSql(
+ "INSERT INTO foo1 "
+ + "SELECT MILLIS_TO_TIMESTAMP((SUM(CAST(\"m1\" AS
BIGINT)))) AS __time "
+ + "FROM foo "
+ + "PARTITIONED BY DAY"
+ )
+ .setExpectedDataSource("foo1")
+ .setExpectedRowSignature(rowSignature)
+ .setQueryContext(context)
+ .setExpectedSegment(
+ ImmutableSet.of(
+ SegmentId.of("foo1",
Intervals.of("1970-01-01/P1D"), "test", 0)
+ )
+ )
+ .setExpectedResultRows(
+ ImmutableList.of(
+ new Object[]{21L}
+ )
+ )
+ .verifyResults();
+
+ }
+
+ @Test
+ public void testInsertOnFoo1WithTimePostAggregator()
+ {
+ RowSignature rowSignature = RowSignature.builder()
+ .add("__time", ColumnType.LONG)
+ .add("sum_m1", ColumnType.DOUBLE)
+ .build();
+
+ testIngestQuery().setSql(
+ "INSERT INTO foo1 "
+ + "SELECT DATE_TRUNC('DAY', TIMESTAMP '2000-01-01' -
INTERVAL '1'DAY) AS __time, SUM(m1) AS sum_m1 "
+ + "FROM foo "
+ + "PARTITIONED BY DAY"
+ )
+ .setExpectedDataSource("foo1")
+ .setExpectedRowSignature(rowSignature)
+ .setQueryContext(context)
+ .setExpectedSegment(
+ ImmutableSet.of(
+ SegmentId.of("foo1",
Intervals.of("1999-12-31T/P1D"), "test", 0)
+ )
+ )
+ .setExpectedResultRows(
+ ImmutableList.of(
+ new Object[]{946598400000L, 21.0}
+ )
+ )
+ .verifyResults();
+
+ }
+
@Test
public void testInsertOnFoo1WithTimeFunctionWithSequential()
{
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]