This is an automated email from the ASF dual-hosted git repository.
jonwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 574b062 Cluster wide default query context setting (#10208)
574b062 is described below
commit 574b062f1f6f1cf0637d99d4ea540a95971c7489
Author: Maytas Monsereenusorn <[email protected]>
AuthorDate: Wed Jul 29 15:19:18 2020 -0700
Cluster wide default query context setting (#10208)
* Cluster wide default query context setting
* Cluster wide default query context setting
* Cluster wide default query context setting
* add docs
* fix docs
* update props
* fix checkstyle
* fix checkstyle
* fix checkstyle
* update docs
* address comments
* fix checkstyle
* fix checkstyle
* fix checkstyle
* fix checkstyle
* fix checkstyle
* fix NPE
---
.../benchmark/GroupByTypeInterfaceBenchmark.java | 2 -
.../query/CachingClusteredClientBenchmark.java | 2 -
.../druid/benchmark/query/GroupByBenchmark.java | 2 -
docs/configuration/index.md | 37 +++++--
docs/querying/query-context.md | 7 +-
.../druid/segment/MapVirtualColumnGroupByTest.java | 2 -
.../{QueryConfig.java => DefaultQueryConfig.java} | 45 ++++----
.../java/org/apache/druid/query/QueryContexts.java | 11 ++
.../epinephelinae/GroupByQueryEngineV2.java | 10 +-
.../epinephelinae/vector/VectorGroupByEngine.java | 7 +-
.../query/groupby/strategy/GroupByStrategyV2.java | 7 +-
.../query/timeseries/TimeseriesQueryEngine.java | 16 +--
.../apache/druid/query/DefaultQueryConfigTest.java | 82 ++++++++++++++
...GroupByLimitPushDownInsufficientBufferTest.java | 6 -
.../GroupByLimitPushDownMultiNodeMergeTest.java | 6 -
.../query/groupby/GroupByMultiSegmentTest.java | 2 -
.../query/groupby/GroupByQueryMergeBufferTest.java | 2 -
.../groupby/GroupByQueryRunnerFailureTest.java | 2 -
.../query/groupby/GroupByQueryRunnerTest.java | 2 -
.../query/groupby/NestedQueryPushDownTest.java | 6 -
.../apache/druid/query/search/QueryConfigTest.java | 80 --------------
.../apache/druid/guice/QueryToolChestModule.java | 4 +-
.../org/apache/druid/server/QueryLifecycle.java | 14 ++-
.../apache/druid/server/QueryLifecycleFactory.java | 8 +-
.../org/apache/druid/server/QueryResourceTest.java | 122 ++++++++++++++++++++-
.../druid/sql/calcite/util/CalciteTests.java | 5 +-
26 files changed, 307 insertions(+), 182 deletions(-)
diff --git
a/benchmarks/src/test/java/org/apache/druid/benchmark/GroupByTypeInterfaceBenchmark.java
b/benchmarks/src/test/java/org/apache/druid/benchmark/GroupByTypeInterfaceBenchmark.java
index d12ff87..d05da5e 100644
---
a/benchmarks/src/test/java/org/apache/druid/benchmark/GroupByTypeInterfaceBenchmark.java
+++
b/benchmarks/src/test/java/org/apache/druid/benchmark/GroupByTypeInterfaceBenchmark.java
@@ -40,7 +40,6 @@ import org.apache.druid.offheap.OffheapBufferGenerator;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@@ -399,7 +398,6 @@ public class GroupByTypeInterfaceBenchmark
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- QueryConfig::new,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
diff --git
a/benchmarks/src/test/java/org/apache/druid/benchmark/query/CachingClusteredClientBenchmark.java
b/benchmarks/src/test/java/org/apache/druid/benchmark/query/CachingClusteredClientBenchmark.java
index 877aca5..6c53abd 100644
---
a/benchmarks/src/test/java/org/apache/druid/benchmark/query/CachingClusteredClientBenchmark.java
+++
b/benchmarks/src/test/java/org/apache/druid/benchmark/query/CachingClusteredClientBenchmark.java
@@ -61,7 +61,6 @@ import org.apache.druid.query.Druids;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.FluentQueryRunnerBuilder;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
@@ -373,7 +372,6 @@ public class CachingClusteredClientBenchmark
new GroupByStrategyV2(
processingConfig,
configSupplier,
- QueryConfig::new,
bufferPool,
mergeBufferPool,
mapper,
diff --git
a/benchmarks/src/test/java/org/apache/druid/benchmark/query/GroupByBenchmark.java
b/benchmarks/src/test/java/org/apache/druid/benchmark/query/GroupByBenchmark.java
index 5721415..67d6273 100644
---
a/benchmarks/src/test/java/org/apache/druid/benchmark/query/GroupByBenchmark.java
+++
b/benchmarks/src/test/java/org/apache/druid/benchmark/query/GroupByBenchmark.java
@@ -44,7 +44,6 @@ import org.apache.druid.offheap.OffheapBufferGenerator;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@@ -562,7 +561,6 @@ public class GroupByBenchmark
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- QueryConfig::new,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index f380b96..01480be 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -1769,14 +1769,35 @@ If there is an L1 miss and L2 hit, it will also
populate L1.
This section describes configurations that control behavior of Druid's query
types, applicable to Broker, Historical, and MiddleManager processes.
-### Query vectorization config
-
-The following configurations are to set the default behavior for query
vectorization.
-
-|Property|Description|Default|
-|--------|-----------|-------|
-|`druid.query.vectorize`|See [Vectorization
parameters](../querying/query-context.html#vectorization-parameters) for
details. This value can be overridden by `vectorize` in the query
contexts.|`true`|
-|`druid.query.vectorSize`|See [Vectorization
parameters](../querying/query-context.html#vectorization-parameters) for
details. This value can be overridden by `vectorSize` in the query
contexts.|`512`|
+### Overriding default query context values
+
+Any [Query Context General
Parameter](../querying/query-context.html#general-parameters) default value can
be
+overridden by setting runtime property in the format of
`druid.query.default.context.{query_context_key}`.
+`druid.query.default.context.{query_context_key}` runtime property prefix
applies to all current and future
+query context keys, the same as how query context parameter passed with the
query works. Note that the runtime property
+value can be overridden if value for the same key is explicitly specify in the
query contexts.
+
+The precedence chain for query context values is as follows:
+
+hard-coded default value in Druid code <- runtime property not prefixed with
`druid.query.default.context`
+<- runtime property prefixed with `druid.query.default.context` <- context
parameter in the query
+
+Note that not all query context key has a runtime property not prefixed with
`druid.query.default.context` that can
+override the hard-coded default value. For example, `maxQueuedBytes` has
`druid.broker.http.maxQueuedBytes`
+but `joinFilterRewriteMaxSize` does not. Hence, the only way of overriding
`joinFilterRewriteMaxSize` hard-coded default
+value is with runtime property
`druid.query.default.context.joinFilterRewriteMaxSize`.
+
+To further elaborate on the previous example:
+
+If neither `druid.broker.http.maxQueuedBytes` or
`druid.query.default.context.maxQueuedBytes` is set and
+the query does not have `maxQueuedBytes` in the context, then the hard-coded
value in Druid code is use.
+If runtime property only contains `druid.broker.http.maxQueuedBytes=x` and
query does not have `maxQueuedBytes` in the
+context, then the value of the property, `x`, is use. However, if query does
have `maxQueuedBytes` in the context,
+then that value is use instead.
+If runtime property only contains
`druid.query.default.context.maxQueuedBytes=y` OR runtime property contains both
+`druid.broker.http.maxQueuedBytes=x` and
`druid.query.default.context.maxQueuedBytes=y`, then the value of
+`druid.query.default.context.maxQueuedBytes`, `y`, is use (given that query
does not have `maxQueuedBytes` in the
+context). If query does have `maxQueuedBytes` in the context, then that value
is use instead.
### TopN query config
diff --git a/docs/querying/query-context.md b/docs/querying/query-context.md
index 89860a6..d14eeb7 100644
--- a/docs/querying/query-context.md
+++ b/docs/querying/query-context.md
@@ -32,6 +32,9 @@ the following ways:
HTTP POST API, or as properties to the JDBC connection.
- For [native queries](querying.md), context parameters are provided as a JSON
object named `context`.
+Note that setting query context will override both the default value and the
runtime properties value in the format of
+`druid.query.default.context.{property_key}` (if set).
+
These parameters apply to all query types.
|property |default | description
|
@@ -100,5 +103,5 @@ vectorization. These query types will ignore the
"vectorize" parameter even if i
|property|default| description|
|--------|-------|------------|
-|vectorize|`true`|Enables or disables vectorized query execution. Possible
values are `false` (disabled), `true` (enabled if possible, disabled otherwise,
on a per-segment basis), and `force` (enabled, and groupBy or timeseries
queries that cannot be vectorized will fail). The `"force"` setting is meant to
aid in testing, and is not generally useful in production (since real-time
segments can never be processed with vectorized execution, any queries on
real-time data will fail). This wil [...]
-|vectorSize|`512`|Sets the row batching size for a particular query. This will
override `druid.query.vectorSize` if it's set.|
+|vectorize|`true`|Enables or disables vectorized query execution. Possible
values are `false` (disabled), `true` (enabled if possible, disabled otherwise,
on a per-segment basis), and `force` (enabled, and groupBy or timeseries
queries that cannot be vectorized will fail). The `"force"` setting is meant to
aid in testing, and is not generally useful in production (since real-time
segments can never be processed with vectorized execution, any queries on
real-time data will fail). This wil [...]
+|vectorSize|`512`|Sets the row batching size for a particular query. This will
override `druid.query.default.context.vectorSize` if it's set.|
diff --git
a/extensions-contrib/virtual-columns/src/test/java/org/apache/druid/segment/MapVirtualColumnGroupByTest.java
b/extensions-contrib/virtual-columns/src/test/java/org/apache/druid/segment/MapVirtualColumnGroupByTest.java
index e16a906..3e0cda4 100644
---
a/extensions-contrib/virtual-columns/src/test/java/org/apache/druid/segment/MapVirtualColumnGroupByTest.java
+++
b/extensions-contrib/virtual-columns/src/test/java/org/apache/druid/segment/MapVirtualColumnGroupByTest.java
@@ -28,7 +28,6 @@ import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.query.DruidProcessingConfig;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerTestHelper;
@@ -100,7 +99,6 @@ public class MapVirtualColumnGroupByTest extends
InitializedNullHandlingTest
}
},
GroupByQueryConfig::new,
- QueryConfig::new,
new StupidPool<>("map-virtual-column-groupby-test", () ->
ByteBuffer.allocate(1024)),
new DefaultBlockingPool<>(() -> ByteBuffer.allocate(1024), 1),
new DefaultObjectMapper(),
diff --git a/processing/src/main/java/org/apache/druid/query/QueryConfig.java
b/processing/src/main/java/org/apache/druid/query/DefaultQueryConfig.java
similarity index 56%
rename from processing/src/main/java/org/apache/druid/query/QueryConfig.java
rename to
processing/src/main/java/org/apache/druid/query/DefaultQueryConfig.java
index b964710..6c585ee 100644
--- a/processing/src/main/java/org/apache/druid/query/QueryConfig.java
+++ b/processing/src/main/java/org/apache/druid/query/DefaultQueryConfig.java
@@ -19,43 +19,48 @@
package org.apache.druid.query;
+import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.druid.query.QueryContexts.Vectorize;
-import org.apache.druid.segment.QueryableIndexStorageAdapter;
+import com.google.common.collect.ImmutableMap;
+
+import javax.annotation.Nonnull;
+import java.util.Map;
/**
* A user configuration holder for all query types.
* Any query-specific configurations should go to their own configuration.
- *
* @see org.apache.druid.query.groupby.GroupByQueryConfig
* @see org.apache.druid.query.search.SearchQueryConfig
* @see org.apache.druid.query.topn.TopNQueryConfig
* @see org.apache.druid.query.metadata.SegmentMetadataQueryConfig
* @see org.apache.druid.query.scan.ScanQueryConfig
+ *
*/
-public class QueryConfig
+public class DefaultQueryConfig
{
+ /**
+ * Note that context values should not be directly retrieved from this field
but instead should
+ * be read through {@link QueryContexts}. This field contains context
configs from runtime property
+ * which is then merged with configs passed in query context. The result of
the merge is subsequently stored in
+ * the query context. The order of precedence in merging of the configs is
as follow:
+ * runtime property values (store in this class) override by query context
parameter passed in with the query
+ */
@JsonProperty
- private Vectorize vectorize = QueryContexts.DEFAULT_VECTORIZE;
-
- @JsonProperty
- private int vectorSize = QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE;
-
- public Vectorize getVectorize()
- {
- return vectorize;
- }
+ private final Map<String, Object> context;
- public int getVectorSize()
+ @Nonnull
+ public Map<String, Object> getContext()
{
- return vectorSize;
+ return context;
}
- public QueryConfig withOverrides(final Query<?> query)
+ @JsonCreator
+ public DefaultQueryConfig(@JsonProperty("context") Map<String, Object>
context)
{
- final QueryConfig newConfig = new QueryConfig();
- newConfig.vectorize = QueryContexts.getVectorize(query, vectorize);
- newConfig.vectorSize = QueryContexts.getVectorSize(query, vectorSize);
- return newConfig;
+ if (context == null) {
+ this.context = ImmutableMap.of();
+ } else {
+ this.context = context;
+ }
}
}
diff --git a/processing/src/main/java/org/apache/druid/query/QueryContexts.java
b/processing/src/main/java/org/apache/druid/query/QueryContexts.java
index b70e8cf..b4af6a5 100644
--- a/processing/src/main/java/org/apache/druid/query/QueryContexts.java
+++ b/processing/src/main/java/org/apache/druid/query/QueryContexts.java
@@ -28,6 +28,7 @@ import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Numbers;
import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.segment.QueryableIndexStorageAdapter;
import java.util.concurrent.TimeUnit;
@@ -184,11 +185,21 @@ public class QueryContexts
return parseBoolean(query, "serializeDateTimeAsLongInner", defaultValue);
}
+ public static <T> Vectorize getVectorize(Query<T> query)
+ {
+ return getVectorize(query, QueryContexts.DEFAULT_VECTORIZE);
+ }
+
public static <T> Vectorize getVectorize(Query<T> query, Vectorize
defaultValue)
{
return parseEnum(query, VECTORIZE_KEY, Vectorize.class, defaultValue);
}
+ public static <T> int getVectorSize(Query<T> query)
+ {
+ return getVectorSize(query,
QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE);
+ }
+
public static <T> int getVectorSize(Query<T> query, int defaultSize)
{
return parseInt(query, VECTOR_SIZE_KEY, defaultSize);
diff --git
a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java
b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java
index f19ae25..85ad1dd 100644
---
a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java
+++
b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java
@@ -33,7 +33,7 @@ import org.apache.druid.java.util.common.guava.BaseSequence;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.query.ColumnSelectorPlus;
-import org.apache.druid.query.QueryConfig;
+import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.aggregation.AggregatorAdapters;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.dimension.ColumnSelectorStrategyFactory;
@@ -114,8 +114,7 @@ public class GroupByQueryEngineV2
final GroupByQuery query,
@Nullable final StorageAdapter storageAdapter,
final NonBlockingPool<ByteBuffer> intermediateResultsBufferPool,
- final GroupByQueryConfig querySpecificConfig,
- final QueryConfig queryConfig
+ final GroupByQueryConfig querySpecificConfig
)
{
if (storageAdapter == null) {
@@ -143,7 +142,7 @@ public class GroupByQueryEngineV2
final Filter filter = Filters.convertToCNFFromQueryContext(query,
Filters.toFilter(query.getFilter()));
final Interval interval = Iterables.getOnlyElement(query.getIntervals());
- final boolean doVectorize = queryConfig.getVectorize().shouldVectorize(
+ final boolean doVectorize =
QueryContexts.getVectorize(query).shouldVectorize(
VectorGroupByEngine.canVectorize(query, storageAdapter, filter)
);
@@ -157,8 +156,7 @@ public class GroupByQueryEngineV2
fudgeTimestamp,
filter,
interval,
- querySpecificConfig,
- queryConfig
+ querySpecificConfig
);
} else {
result = processNonVectorized(
diff --git
a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/vector/VectorGroupByEngine.java
b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/vector/VectorGroupByEngine.java
index 10408fe..99f14ae 100644
---
a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/vector/VectorGroupByEngine.java
+++
b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/vector/VectorGroupByEngine.java
@@ -26,7 +26,7 @@ import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.guava.BaseSequence;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.parsers.CloseableIterator;
-import org.apache.druid.query.QueryConfig;
+import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.aggregation.AggregatorAdapters;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.query.filter.Filter;
@@ -95,8 +95,7 @@ public class VectorGroupByEngine
@Nullable final DateTime fudgeTimestamp,
@Nullable final Filter filter,
final Interval interval,
- final GroupByQueryConfig config,
- final QueryConfig queryConfig
+ final GroupByQueryConfig config
)
{
if (!canVectorize(query, storageAdapter, filter)) {
@@ -114,7 +113,7 @@ public class VectorGroupByEngine
interval,
query.getVirtualColumns(),
false,
- queryConfig.getVectorSize(),
+ QueryContexts.getVectorSize(query),
null
);
diff --git
a/processing/src/main/java/org/apache/druid/query/groupby/strategy/GroupByStrategyV2.java
b/processing/src/main/java/org/apache/druid/query/groupby/strategy/GroupByStrategyV2.java
index bc4dc1b..455cb7d 100644
---
a/processing/src/main/java/org/apache/druid/query/groupby/strategy/GroupByStrategyV2.java
+++
b/processing/src/main/java/org/apache/druid/query/groupby/strategy/GroupByStrategyV2.java
@@ -42,7 +42,6 @@ import org.apache.druid.query.DataSource;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.InsufficientResourcesException;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryPlus;
@@ -89,7 +88,6 @@ public class GroupByStrategyV2 implements GroupByStrategy
private final DruidProcessingConfig processingConfig;
private final Supplier<GroupByQueryConfig> configSupplier;
- private final Supplier<QueryConfig> queryConfigSupplier;
private final NonBlockingPool<ByteBuffer> bufferPool;
private final BlockingPool<ByteBuffer> mergeBufferPool;
private final ObjectMapper spillMapper;
@@ -99,7 +97,6 @@ public class GroupByStrategyV2 implements GroupByStrategy
public GroupByStrategyV2(
DruidProcessingConfig processingConfig,
Supplier<GroupByQueryConfig> configSupplier,
- Supplier<QueryConfig> queryConfigSupplier,
@Global NonBlockingPool<ByteBuffer> bufferPool,
@Merging BlockingPool<ByteBuffer> mergeBufferPool,
@Smile ObjectMapper spillMapper,
@@ -108,7 +105,6 @@ public class GroupByStrategyV2 implements GroupByStrategy
{
this.processingConfig = processingConfig;
this.configSupplier = configSupplier;
- this.queryConfigSupplier = queryConfigSupplier;
this.bufferPool = bufferPool;
this.mergeBufferPool = mergeBufferPool;
this.spillMapper = spillMapper;
@@ -574,8 +570,7 @@ public class GroupByStrategyV2 implements GroupByStrategy
query,
storageAdapter,
bufferPool,
- configSupplier.get().withOverrides(query),
- queryConfigSupplier.get().withOverrides(query)
+ configSupplier.get().withOverrides(query)
);
}
diff --git
a/processing/src/main/java/org/apache/druid/query/timeseries/TimeseriesQueryEngine.java
b/processing/src/main/java/org/apache/druid/query/timeseries/TimeseriesQueryEngine.java
index f420f78..e2437a3 100644
---
a/processing/src/main/java/org/apache/druid/query/timeseries/TimeseriesQueryEngine.java
+++
b/processing/src/main/java/org/apache/druid/query/timeseries/TimeseriesQueryEngine.java
@@ -20,8 +20,6 @@
package org.apache.druid.query.timeseries;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Supplier;
-import com.google.common.base.Suppliers;
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import org.apache.druid.collections.NonBlockingPool;
@@ -33,7 +31,7 @@ import
org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.java.util.common.io.Closer;
-import org.apache.druid.query.QueryConfig;
+import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryRunnerHelper;
import org.apache.druid.query.Result;
import org.apache.druid.query.aggregation.Aggregator;
@@ -59,7 +57,6 @@ import java.util.Objects;
*/
public class TimeseriesQueryEngine
{
- private final Supplier<QueryConfig> queryConfigSupplier;
private final NonBlockingPool<ByteBuffer> bufferPool;
/**
@@ -68,17 +65,14 @@ public class TimeseriesQueryEngine
@VisibleForTesting
public TimeseriesQueryEngine()
{
- this.queryConfigSupplier = Suppliers.ofInstance(new QueryConfig());
this.bufferPool = new StupidPool<>("dummy", () ->
ByteBuffer.allocate(1000000));
}
@Inject
public TimeseriesQueryEngine(
- final Supplier<QueryConfig> queryConfigSupplier,
final @Global NonBlockingPool<ByteBuffer> bufferPool
)
{
- this.queryConfigSupplier = queryConfigSupplier;
this.bufferPool = bufferPool;
}
@@ -94,13 +88,12 @@ public class TimeseriesQueryEngine
);
}
- final QueryConfig queryConfigToUse =
queryConfigSupplier.get().withOverrides(query);
final Filter filter = Filters.convertToCNFFromQueryContext(query,
Filters.toFilter(query.getFilter()));
final Interval interval = Iterables.getOnlyElement(query.getIntervals());
final Granularity gran = query.getGranularity();
final boolean descending = query.isDescending();
- final boolean doVectorize =
queryConfigToUse.getVectorize().shouldVectorize(
+ final boolean doVectorize =
QueryContexts.getVectorize(query).shouldVectorize(
adapter.canVectorize(filter, query.getVirtualColumns(), descending)
&& query.getAggregatorSpecs().stream().allMatch(aggregatorFactory ->
aggregatorFactory.canVectorize(adapter))
);
@@ -108,7 +101,7 @@ public class TimeseriesQueryEngine
final Sequence<Result<TimeseriesResultValue>> result;
if (doVectorize) {
- result = processVectorized(query, queryConfigToUse, adapter, filter,
interval, gran, descending);
+ result = processVectorized(query, adapter, filter, interval, gran,
descending);
} else {
result = processNonVectorized(query, adapter, filter, interval, gran,
descending);
}
@@ -123,7 +116,6 @@ public class TimeseriesQueryEngine
private Sequence<Result<TimeseriesResultValue>> processVectorized(
final TimeseriesQuery query,
- final QueryConfig queryConfig,
final StorageAdapter adapter,
@Nullable final Filter filter,
final Interval queryInterval,
@@ -139,7 +131,7 @@ public class TimeseriesQueryEngine
queryInterval,
query.getVirtualColumns(),
descending,
- queryConfig.getVectorSize(),
+ QueryContexts.getVectorSize(query),
null
);
diff --git
a/processing/src/test/java/org/apache/druid/query/DefaultQueryConfigTest.java
b/processing/src/test/java/org/apache/druid/query/DefaultQueryConfigTest.java
new file mode 100644
index 0000000..5f77874
--- /dev/null
+++
b/processing/src/test/java/org/apache/druid/query/DefaultQueryConfigTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query;
+
+import com.google.common.collect.ImmutableList;
+import com.google.inject.Injector;
+import org.apache.druid.guice.GuiceInjectors;
+import org.apache.druid.guice.JsonConfigProvider;
+import org.apache.druid.guice.JsonConfigurator;
+import org.apache.druid.guice.annotations.Global;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Properties;
+
+public class DefaultQueryConfigTest
+{
+ @Test
+ public void testSerdeContextMap()
+ {
+ final Injector injector = createInjector();
+ final String propertyPrefix = "druid.query.default";
+ final JsonConfigProvider<DefaultQueryConfig> provider =
JsonConfigProvider.of(
+ propertyPrefix,
+ DefaultQueryConfig.class
+ );
+ final Properties properties = new Properties();
+ properties.put(propertyPrefix + ".context.joinFilterRewriteMaxSize", "10");
+ properties.put(propertyPrefix + ".context.vectorize", "true");
+ provider.inject(properties, injector.getInstance(JsonConfigurator.class));
+ final DefaultQueryConfig defaultQueryConfig = provider.get().get();
+ Assert.assertNotNull(defaultQueryConfig.getContext());
+ Assert.assertEquals(2, defaultQueryConfig.getContext().size());
+ Assert.assertEquals("10",
defaultQueryConfig.getContext().get("joinFilterRewriteMaxSize"));
+ Assert.assertEquals("true",
defaultQueryConfig.getContext().get("vectorize"));
+ }
+
+ @Test
+ public void testSerdeEmptyContextMap()
+ {
+ final Injector injector = createInjector();
+ final String propertyPrefix = "druid.query.default";
+ final JsonConfigProvider<DefaultQueryConfig> provider =
JsonConfigProvider.of(
+ propertyPrefix,
+ DefaultQueryConfig.class
+ );
+ final Properties properties = new Properties();
+ provider.inject(properties, injector.getInstance(JsonConfigurator.class));
+ final DefaultQueryConfig defaultQueryConfig = provider.get().get();
+ Assert.assertNotNull(defaultQueryConfig.getContext());
+ Assert.assertEquals(0, defaultQueryConfig.getContext().size());
+ }
+
+ private Injector createInjector()
+ {
+ Injector injector = GuiceInjectors.makeStartupInjectorWithModules(
+ ImmutableList.of(
+ binder -> {
+ JsonConfigProvider.bind(binder, "druid.query.default",
DefaultQueryConfig.class, Global.class);
+ }
+ )
+ );
+ return injector;
+ }
+}
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownInsufficientBufferTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownInsufficientBufferTest.java
index 53d2b97..af2b522 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownInsufficientBufferTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownInsufficientBufferTest.java
@@ -48,7 +48,6 @@ import org.apache.druid.query.BySegmentQueryRunner;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@@ -352,9 +351,6 @@ public class GroupByLimitPushDownInsufficientBufferTest
};
final Supplier<GroupByQueryConfig> configSupplier =
Suppliers.ofInstance(config);
- final Supplier<QueryConfig> queryConfigSupplier = Suppliers.ofInstance(
- new QueryConfig()
- );
final GroupByStrategySelector strategySelector = new
GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
@@ -366,7 +362,6 @@ public class GroupByLimitPushDownInsufficientBufferTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- queryConfigSupplier,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
@@ -385,7 +380,6 @@ public class GroupByLimitPushDownInsufficientBufferTest
new GroupByStrategyV2(
tooSmallDruidProcessingConfig,
configSupplier,
- queryConfigSupplier,
bufferPool,
tooSmallMergePool,
new ObjectMapper(new SmileFactory()),
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java
index 85f5071..3399377 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java
@@ -48,7 +48,6 @@ import org.apache.druid.query.BySegmentQueryRunner;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@@ -384,9 +383,6 @@ public class GroupByLimitPushDownMultiNodeMergeTest
};
final Supplier<GroupByQueryConfig> configSupplier =
Suppliers.ofInstance(config);
- final Supplier<QueryConfig> queryConfigSupplier = Suppliers.ofInstance(
- new QueryConfig()
- );
final GroupByStrategySelector strategySelector = new
GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
@@ -398,7 +394,6 @@ public class GroupByLimitPushDownMultiNodeMergeTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- queryConfigSupplier,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
@@ -417,7 +412,6 @@ public class GroupByLimitPushDownMultiNodeMergeTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- queryConfigSupplier,
bufferPool,
mergePool2,
new ObjectMapper(new SmileFactory()),
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByMultiSegmentTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByMultiSegmentTest.java
index 180f1af..b729e7d 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByMultiSegmentTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByMultiSegmentTest.java
@@ -45,7 +45,6 @@ import org.apache.druid.query.BySegmentQueryRunner;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@@ -275,7 +274,6 @@ public class GroupByMultiSegmentTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- Suppliers.ofInstance(new QueryConfig()),
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryMergeBufferTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryMergeBufferTest.java
index fa18a0a..414a89f 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryMergeBufferTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryMergeBufferTest.java
@@ -31,7 +31,6 @@ import
org.apache.druid.collections.ReferenceCountingResourceHolder;
import org.apache.druid.java.util.common.concurrent.Execs;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.query.DruidProcessingConfig;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryRunner;
@@ -138,7 +137,6 @@ public class GroupByQueryMergeBufferTest extends
InitializedNullHandlingTest
new GroupByStrategyV2(
PROCESSING_CONFIG,
configSupplier,
- Suppliers.ofInstance(new QueryConfig()),
BUFFER_POOL,
MERGE_BUFFER_POOL,
mapper,
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerFailureTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerFailureTest.java
index 896016c..70806e6 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerFailureTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerFailureTest.java
@@ -31,7 +31,6 @@ import org.apache.druid.java.util.common.concurrent.Execs;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.InsufficientResourcesException;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryInterruptedException;
@@ -111,7 +110,6 @@ public class GroupByQueryRunnerFailureTest
new GroupByStrategyV2(
DEFAULT_PROCESSING_CONFIG,
configSupplier,
- Suppliers.ofInstance(new QueryConfig()),
BUFFER_POOL,
MERGE_BUFFER_POOL,
mapper,
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java
index 31c438f..e8ab616 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java
@@ -53,7 +53,6 @@ import org.apache.druid.query.BySegmentResultValueClass;
import org.apache.druid.query.ChainedExecutionQueryRunner;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryPlus;
@@ -396,7 +395,6 @@ public class GroupByQueryRunnerTest extends
InitializedNullHandlingTest
new GroupByStrategyV2(
processingConfig,
configSupplier,
- Suppliers.ofInstance(new QueryConfig()),
bufferPool,
mergeBufferPool,
mapper,
diff --git
a/processing/src/test/java/org/apache/druid/query/groupby/NestedQueryPushDownTest.java
b/processing/src/test/java/org/apache/druid/query/groupby/NestedQueryPushDownTest.java
index a294d57..fc5535f 100644
---
a/processing/src/test/java/org/apache/druid/query/groupby/NestedQueryPushDownTest.java
+++
b/processing/src/test/java/org/apache/druid/query/groupby/NestedQueryPushDownTest.java
@@ -49,7 +49,6 @@ import org.apache.druid.query.BySegmentQueryRunner;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@@ -312,9 +311,6 @@ public class NestedQueryPushDownTest
};
final Supplier<GroupByQueryConfig> configSupplier =
Suppliers.ofInstance(config);
- final Supplier<QueryConfig> queryConfigSupplier = Suppliers.ofInstance(
- new QueryConfig()
- );
final GroupByStrategySelector strategySelector = new
GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
@@ -326,7 +322,6 @@ public class NestedQueryPushDownTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- queryConfigSupplier,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
@@ -345,7 +340,6 @@ public class NestedQueryPushDownTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
- queryConfigSupplier,
bufferPool,
mergePool2,
new ObjectMapper(new SmileFactory()),
diff --git
a/processing/src/test/java/org/apache/druid/query/search/QueryConfigTest.java
b/processing/src/test/java/org/apache/druid/query/search/QueryConfigTest.java
deleted file mode 100644
index 78a00ee..0000000
---
a/processing/src/test/java/org/apache/druid/query/search/QueryConfigTest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.query.search;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
-import org.apache.druid.query.QueryContexts;
-import org.apache.druid.query.QueryContexts.Vectorize;
-import org.apache.druid.query.TableDataSource;
-import org.apache.druid.query.TestQuery;
-import org.apache.druid.query.spec.MultipleIntervalSegmentSpec;
-import org.apache.druid.segment.QueryableIndexStorageAdapter;
-import org.apache.druid.segment.TestHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-
-public class QueryConfigTest
-{
- @Test
- public void testSerde() throws IOException
- {
- final ObjectMapper mapper = TestHelper.makeJsonMapper();
- final String json = "{"
- + "\"vectorize\" : \"force\","
- + "\"vectorSize\" : 1"
- + "}";
- final QueryConfig config = mapper.readValue(json, QueryConfig.class);
- Assert.assertEquals(Vectorize.FORCE, config.getVectorize());
- Assert.assertEquals(1, config.getVectorSize());
- }
-
- @Test
- public void testDefault()
- {
- final QueryConfig config = new QueryConfig();
- Assert.assertEquals(QueryContexts.DEFAULT_VECTORIZE,
config.getVectorize());
- Assert.assertEquals(QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE,
config.getVectorSize());
- }
-
- @Test
- public void testOverrides()
- {
- final Query<?> query = new TestQuery(
- new TableDataSource("datasource"),
- new MultipleIntervalSegmentSpec(ImmutableList.of()),
- false,
- ImmutableMap.of(
- QueryContexts.VECTORIZE_KEY,
- "true",
- QueryContexts.VECTOR_SIZE_KEY,
- QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE * 2
- )
- );
- final QueryConfig config = new QueryConfig().withOverrides(query);
- Assert.assertEquals(Vectorize.TRUE, config.getVectorize());
- Assert.assertEquals(QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE * 2,
config.getVectorSize());
- }
-}
diff --git
a/server/src/main/java/org/apache/druid/guice/QueryToolChestModule.java
b/server/src/main/java/org/apache/druid/guice/QueryToolChestModule.java
index 2fb685c..edb892b 100644
--- a/server/src/main/java/org/apache/druid/guice/QueryToolChestModule.java
+++ b/server/src/main/java/org/apache/druid/guice/QueryToolChestModule.java
@@ -25,10 +25,10 @@ import com.google.inject.Key;
import com.google.inject.Module;
import com.google.inject.multibindings.MapBinder;
import org.apache.druid.query.DefaultGenericQueryMetricsFactory;
+import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.GenericQueryMetricsFactory;
import org.apache.druid.query.MapQueryToolChestWarehouse;
import org.apache.druid.query.Query;
-import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.QueryToolChestWarehouse;
import org.apache.druid.query.datasourcemetadata.DataSourceMetadataQuery;
@@ -97,7 +97,7 @@ public class QueryToolChestModule implements Module
binder.bind(QueryToolChestWarehouse.class).to(MapQueryToolChestWarehouse.class);
- JsonConfigProvider.bind(binder, "druid.query", QueryConfig.class);
+ JsonConfigProvider.bind(binder, "druid.query.default",
DefaultQueryConfig.class);
JsonConfigProvider.bind(binder, "druid.query.groupBy",
GroupByQueryConfig.class);
JsonConfigProvider.bind(binder, "druid.query.search",
SearchQueryConfig.class);
JsonConfigProvider.bind(binder, "druid.query.topN", TopNQueryConfig.class);
diff --git a/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
b/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
index ff04a58..cf6cde5 100644
--- a/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
+++ b/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
@@ -31,6 +31,8 @@ import
org.apache.druid.java.util.common.guava.SequenceWrapper;
import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
+import org.apache.druid.query.BaseQuery;
+import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.DruidMetrics;
import org.apache.druid.query.GenericQueryMetricsFactory;
import org.apache.druid.query.Query;
@@ -77,6 +79,7 @@ public class QueryLifecycle
private final ServiceEmitter emitter;
private final RequestLogger requestLogger;
private final AuthorizerMapper authorizerMapper;
+ private final DefaultQueryConfig defaultQueryConfig;
private final long startMs;
private final long startNs;
@@ -92,6 +95,7 @@ public class QueryLifecycle
final ServiceEmitter emitter,
final RequestLogger requestLogger,
final AuthorizerMapper authorizerMapper,
+ final DefaultQueryConfig defaultQueryConfig,
final long startMs,
final long startNs
)
@@ -102,6 +106,7 @@ public class QueryLifecycle
this.emitter = emitter;
this.requestLogger = requestLogger;
this.authorizerMapper = authorizerMapper;
+ this.defaultQueryConfig = defaultQueryConfig;
this.startMs = startMs;
this.startNs = startNs;
}
@@ -170,7 +175,14 @@ public class QueryLifecycle
queryId = UUID.randomUUID().toString();
}
- this.baseQuery = baseQuery.withId(queryId);
+ Map<String, Object> mergedUserAndConfigContext;
+ if (baseQuery.getContext() != null) {
+ mergedUserAndConfigContext =
BaseQuery.computeOverriddenContext(defaultQueryConfig.getContext(),
baseQuery.getContext());
+ } else {
+ mergedUserAndConfigContext = defaultQueryConfig.getContext();
+ }
+
+ this.baseQuery =
baseQuery.withOverriddenContext(mergedUserAndConfigContext).withId(queryId);
this.toolChest = warehouse.getToolChest(baseQuery);
}
diff --git
a/server/src/main/java/org/apache/druid/server/QueryLifecycleFactory.java
b/server/src/main/java/org/apache/druid/server/QueryLifecycleFactory.java
index cae225c..106ffe9 100644
--- a/server/src/main/java/org/apache/druid/server/QueryLifecycleFactory.java
+++ b/server/src/main/java/org/apache/druid/server/QueryLifecycleFactory.java
@@ -19,9 +19,11 @@
package org.apache.druid.server;
+import com.google.common.base.Supplier;
import com.google.inject.Inject;
import org.apache.druid.guice.LazySingleton;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
+import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.GenericQueryMetricsFactory;
import org.apache.druid.query.QuerySegmentWalker;
import org.apache.druid.query.QueryToolChestWarehouse;
@@ -38,6 +40,7 @@ public class QueryLifecycleFactory
private final ServiceEmitter emitter;
private final RequestLogger requestLogger;
private final AuthorizerMapper authorizerMapper;
+ private final DefaultQueryConfig defaultQueryConfig;
@Inject
public QueryLifecycleFactory(
@@ -47,7 +50,8 @@ public class QueryLifecycleFactory
final ServiceEmitter emitter,
final RequestLogger requestLogger,
final AuthConfig authConfig,
- final AuthorizerMapper authorizerMapper
+ final AuthorizerMapper authorizerMapper,
+ final Supplier<DefaultQueryConfig> queryConfigSupplier
)
{
this.warehouse = warehouse;
@@ -56,6 +60,7 @@ public class QueryLifecycleFactory
this.emitter = emitter;
this.requestLogger = requestLogger;
this.authorizerMapper = authorizerMapper;
+ this.defaultQueryConfig = queryConfigSupplier.get();
}
public QueryLifecycle factorize()
@@ -67,6 +72,7 @@ public class QueryLifecycleFactory
emitter,
requestLogger,
authorizerMapper,
+ defaultQueryConfig,
System.currentTimeMillis(),
System.nanoTime()
);
diff --git
a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
index b9f57d5..0a24b0e 100644
--- a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
+++ b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
@@ -22,6 +22,7 @@ package org.apache.druid.server;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes;
+import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
@@ -34,6 +35,7 @@ import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.java.util.emitter.EmittingLogger;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
import org.apache.druid.query.DefaultGenericQueryMetricsFactory;
+import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.MapQueryToolChestWarehouse;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryInterruptedException;
@@ -202,7 +204,8 @@ public class QueryResourceTest
new NoopServiceEmitter(),
testRequestLogger,
new AuthConfig(),
- AuthTestUtils.TEST_AUTHORIZER_MAPPER
+ AuthTestUtils.TEST_AUTHORIZER_MAPPER,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
),
JSON_MAPPER,
JSON_MAPPER,
@@ -234,6 +237,111 @@ public class QueryResourceTest
}
@Test
+ public void testGoodQueryWithQueryConfigOverrideDefault() throws IOException
+ {
+ String overrideConfigKey = "priority";
+ String overrideConfigValue = "678";
+ DefaultQueryConfig overrideConfig = new
DefaultQueryConfig(ImmutableMap.of(overrideConfigKey, overrideConfigValue));
+ queryResource = new QueryResource(
+ new QueryLifecycleFactory(
+ WAREHOUSE,
+ TEST_SEGMENT_WALKER,
+ new DefaultGenericQueryMetricsFactory(),
+ new NoopServiceEmitter(),
+ testRequestLogger,
+ new AuthConfig(),
+ AuthTestUtils.TEST_AUTHORIZER_MAPPER,
+ Suppliers.ofInstance(overrideConfig)
+ ),
+ JSON_MAPPER,
+ JSON_MAPPER,
+ queryScheduler,
+ new AuthConfig(),
+ null,
+ ResponseContextConfig.newConfig(true),
+ DRUID_NODE
+ );
+
+ expectPermissiveHappyPathAuth();
+
+ Response response = queryResource.doPost(
+ new
ByteArrayInputStream(SIMPLE_TIMESERIES_QUERY.getBytes(StandardCharsets.UTF_8)),
+ null /*pretty*/,
+ testServletRequest
+ );
+ Assert.assertNotNull(response);
+
+ final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ((StreamingOutput) response.getEntity()).write(baos);
+ final List<Result<TimeBoundaryResultValue>> responses =
JSON_MAPPER.readValue(
+ baos.toByteArray(),
+ new TypeReference<List<Result<TimeBoundaryResultValue>>>() {}
+ );
+
+ Assert.assertNotNull(response);
+ Assert.assertEquals(Response.Status.OK.getStatusCode(),
response.getStatus());
+ Assert.assertEquals(0, responses.size());
+ Assert.assertEquals(1, testRequestLogger.getNativeQuerylogs().size());
+
Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery());
+
Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext());
+
Assert.assertTrue(testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext().containsKey(overrideConfigKey));
+ Assert.assertEquals(overrideConfigValue,
testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext().get(overrideConfigKey));
+ }
+
+ @Test
+ public void testGoodQueryWithQueryConfigDoesNotOverrideQueryContext() throws
IOException
+ {
+ String overrideConfigKey = "priority";
+ String overrideConfigValue = "678";
+ DefaultQueryConfig overrideConfig = new
DefaultQueryConfig(ImmutableMap.of(overrideConfigKey, overrideConfigValue));
+ queryResource = new QueryResource(
+ new QueryLifecycleFactory(
+ WAREHOUSE,
+ TEST_SEGMENT_WALKER,
+ new DefaultGenericQueryMetricsFactory(),
+ new NoopServiceEmitter(),
+ testRequestLogger,
+ new AuthConfig(),
+ AuthTestUtils.TEST_AUTHORIZER_MAPPER,
+ Suppliers.ofInstance(overrideConfig)
+ ),
+ JSON_MAPPER,
+ JSON_MAPPER,
+ queryScheduler,
+ new AuthConfig(),
+ null,
+ ResponseContextConfig.newConfig(true),
+ DRUID_NODE
+ );
+
+ expectPermissiveHappyPathAuth();
+
+ Response response = queryResource.doPost(
+ // SIMPLE_TIMESERIES_QUERY_LOW_PRIORITY context has overrideConfigKey
with value of -1
+ new
ByteArrayInputStream(SIMPLE_TIMESERIES_QUERY_LOW_PRIORITY.getBytes(StandardCharsets.UTF_8)),
+ null /*pretty*/,
+ testServletRequest
+ );
+ Assert.assertNotNull(response);
+
+ final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ((StreamingOutput) response.getEntity()).write(baos);
+ final List<Result<TimeBoundaryResultValue>> responses =
JSON_MAPPER.readValue(
+ baos.toByteArray(),
+ new TypeReference<List<Result<TimeBoundaryResultValue>>>() {}
+ );
+
+ Assert.assertNotNull(response);
+ Assert.assertEquals(Response.Status.OK.getStatusCode(),
response.getStatus());
+ Assert.assertEquals(0, responses.size());
+ Assert.assertEquals(1, testRequestLogger.getNativeQuerylogs().size());
+
Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery());
+
Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext());
+
Assert.assertTrue(testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext().containsKey(overrideConfigKey));
+ Assert.assertEquals(-1,
testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext().get(overrideConfigKey));
+ }
+
+ @Test
public void testTruncatedResponseContextShouldFail() throws IOException
{
expectPermissiveHappyPathAuth();
@@ -471,7 +579,8 @@ public class QueryResourceTest
new NoopServiceEmitter(),
testRequestLogger,
new AuthConfig(),
- authMapper
+ authMapper,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
),
JSON_MAPPER,
JSON_MAPPER,
@@ -586,7 +695,8 @@ public class QueryResourceTest
new NoopServiceEmitter(),
testRequestLogger,
new AuthConfig(),
- authMapper
+ authMapper,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
),
JSON_MAPPER,
JSON_MAPPER,
@@ -709,7 +819,8 @@ public class QueryResourceTest
new NoopServiceEmitter(),
testRequestLogger,
new AuthConfig(),
- authMapper
+ authMapper,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
),
JSON_MAPPER,
JSON_MAPPER,
@@ -967,7 +1078,8 @@ public class QueryResourceTest
new NoopServiceEmitter(),
testRequestLogger,
new AuthConfig(),
- AuthTestUtils.TEST_AUTHORIZER_MAPPER
+ AuthTestUtils.TEST_AUTHORIZER_MAPPER,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
),
JSON_MAPPER,
JSON_MAPPER,
diff --git
a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
index 6c6427e..0c186ed 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
@@ -20,6 +20,7 @@
package org.apache.druid.sql.calcite.util;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
@@ -59,6 +60,7 @@ import
org.apache.druid.java.util.http.client.response.HttpResponseHandler;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.DataSource;
import org.apache.druid.query.DefaultGenericQueryMetricsFactory;
+import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.GlobalTableDataSource;
import org.apache.druid.query.InlineDataSource;
import org.apache.druid.query.Query;
@@ -705,7 +707,8 @@ public class CalciteTests
new ServiceEmitter("dummy", "dummy", new NoopEmitter()),
new NoopRequestLogger(),
new AuthConfig(),
- TEST_AUTHORIZER_MAPPER
+ TEST_AUTHORIZER_MAPPER,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]