[
https://issues.apache.org/jira/browse/DRILL-5735?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16597954#comment-16597954
]
ASF GitHub Bot commented on DRILL-5735:
---------------------------------------
sohami closed pull request #1279: DRILL-5735: UI options grouping and filtering
& Metrics hints
URL: https://github.com/apache/drill/pull/1279
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 3817971cce5..2b4bae15811 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -21,6 +21,7 @@
import org.apache.drill.exec.physical.impl.common.HashTable;
import org.apache.drill.exec.rpc.user.InboundImpersonationManager;
import org.apache.drill.exec.server.options.OptionValidator;
+import org.apache.drill.exec.server.options.OptionValidator.OptionDescription;
import org.apache.drill.exec.server.options.TypeValidators.IntegerValidator;
import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator;
import org.apache.drill.exec.server.options.TypeValidators.DoubleValidator;
@@ -63,7 +64,7 @@ private ExecConstants() {
public static final String BIT_SERVER_RPC_THREADS =
"drill.exec.rpc.bit.server.threads";
public static final String USER_SERVER_RPC_THREADS =
"drill.exec.rpc.user.server.threads";
public static final String FRAG_RUNNER_RPC_TIMEOUT =
"drill.exec.rpc.fragrunner.timeout";
- public static final PositiveLongValidator FRAG_RUNNER_RPC_TIMEOUT_VALIDATOR
= new PositiveLongValidator(FRAG_RUNNER_RPC_TIMEOUT, Long.MAX_VALUE);
+ public static final PositiveLongValidator FRAG_RUNNER_RPC_TIMEOUT_VALIDATOR
= new PositiveLongValidator(FRAG_RUNNER_RPC_TIMEOUT, Long.MAX_VALUE, null);
public static final String TRACE_DUMP_DIRECTORY =
"drill.exec.trace.directory";
public static final String TRACE_DUMP_FILESYSTEM =
"drill.exec.trace.filesystem";
public static final String TEMP_DIRECTORIES = "drill.exec.tmp.directories";
@@ -85,11 +86,12 @@ private ExecConstants() {
public static final String OUTPUT_BATCH_SIZE =
"drill.exec.memory.operator.output_batch_size";
// Output Batch Size in Bytes. We have a small lower bound so we can test
with unit tests without the
// need to produce very large batches that take up lot of memory.
- public static final LongValidator OUTPUT_BATCH_SIZE_VALIDATOR = new
RangeLongValidator(OUTPUT_BATCH_SIZE, 128, 512 * 1024 * 1024);
+ public static final LongValidator OUTPUT_BATCH_SIZE_VALIDATOR = new
RangeLongValidator(OUTPUT_BATCH_SIZE, 128, 512 * 1024 * 1024,
+ new OptionDescription("Available as of Drill 1.13. Limits the amount of
memory that the Flatten, Merge Join, and External Sort operators allocate to
outgoing batches."));
// Based on available memory, adjust output batch size for buffered
operators by this factor.
public static final String OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR =
"drill.exec.memory.operator.output_batch_size_avail_mem_factor";
- public static final DoubleValidator
OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR_VALIDATOR = new
RangeDoubleValidator(OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR, 0.01, 1.0);
+ public static final DoubleValidator
OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR_VALIDATOR = new
RangeDoubleValidator(OUTPUT_BATCH_SIZE_AVAIL_MEM_FACTOR, 0.01, 1.0, null);
// External Sort Boot configuration
@@ -109,57 +111,57 @@ private ExecConstants() {
// External Sort Runtime options
- public static final BooleanValidator EXTERNAL_SORT_DISABLE_MANAGED_OPTION =
new BooleanValidator("exec.sort.disable_managed");
+ public static final BooleanValidator EXTERNAL_SORT_DISABLE_MANAGED_OPTION =
new BooleanValidator("exec.sort.disable_managed", null);
// Hash Join Options
public static final String HASHJOIN_HASHTABLE_CALC_TYPE_KEY =
"exec.hashjoin.hash_table_calc_type";
- public static final StringValidator HASHJOIN_HASHTABLE_CALC_TYPE = new
StringValidator(HASHJOIN_HASHTABLE_CALC_TYPE_KEY);
+ public static final StringValidator HASHJOIN_HASHTABLE_CALC_TYPE = new
StringValidator(HASHJOIN_HASHTABLE_CALC_TYPE_KEY, null);
public static final String HASHJOIN_SAFETY_FACTOR_KEY =
"exec.hashjoin.safety_factor";
- public static final DoubleValidator HASHJOIN_SAFETY_FACTOR = new
RangeDoubleValidator(HASHJOIN_SAFETY_FACTOR_KEY, 1.0, Double.MAX_VALUE);
+ public static final DoubleValidator HASHJOIN_SAFETY_FACTOR = new
RangeDoubleValidator(HASHJOIN_SAFETY_FACTOR_KEY, 1.0, Double.MAX_VALUE, null);
public static final String HASHJOIN_HASH_DOUBLE_FACTOR_KEY =
"exec.hashjoin.hash_double_factor";
- public static final DoubleValidator HASHJOIN_HASH_DOUBLE_FACTOR = new
RangeDoubleValidator(HASHJOIN_HASH_DOUBLE_FACTOR_KEY, 1.0, Double.MAX_VALUE);
+ public static final DoubleValidator HASHJOIN_HASH_DOUBLE_FACTOR = new
RangeDoubleValidator(HASHJOIN_HASH_DOUBLE_FACTOR_KEY, 1.0, Double.MAX_VALUE,
null);
public static final String HASHJOIN_FRAGMENTATION_FACTOR_KEY =
"exec.hashjoin.fragmentation_factor";
- public static final DoubleValidator HASHJOIN_FRAGMENTATION_FACTOR = new
RangeDoubleValidator(HASHJOIN_FRAGMENTATION_FACTOR_KEY, 1.0, Double.MAX_VALUE);
+ public static final DoubleValidator HASHJOIN_FRAGMENTATION_FACTOR = new
RangeDoubleValidator(HASHJOIN_FRAGMENTATION_FACTOR_KEY, 1.0, Double.MAX_VALUE,
null);
public static final String HASHJOIN_NUM_ROWS_IN_BATCH_KEY =
"exec.hashjoin.num_rows_in_batch";
- public static final LongValidator HASHJOIN_NUM_ROWS_IN_BATCH_VALIDATOR = new
RangeLongValidator(HASHJOIN_NUM_ROWS_IN_BATCH_KEY, 1, 65536);
+ public static final LongValidator HASHJOIN_NUM_ROWS_IN_BATCH_VALIDATOR = new
RangeLongValidator(HASHJOIN_NUM_ROWS_IN_BATCH_KEY, 1, 65536, null);
public static final String HASHJOIN_MAX_BATCHES_IN_MEMORY_KEY =
"exec.hashjoin.max_batches_in_memory";
- public static final LongValidator HASHJOIN_MAX_BATCHES_IN_MEMORY_VALIDATOR =
new RangeLongValidator(HASHJOIN_MAX_BATCHES_IN_MEMORY_KEY, 0, 65536);
+ public static final LongValidator HASHJOIN_MAX_BATCHES_IN_MEMORY_VALIDATOR =
new RangeLongValidator(HASHJOIN_MAX_BATCHES_IN_MEMORY_KEY, 0, 65536, null);
public static final String HASHJOIN_NUM_PARTITIONS_KEY =
"exec.hashjoin.num_partitions";
- public static final LongValidator HASHJOIN_NUM_PARTITIONS_VALIDATOR = new
RangeLongValidator(HASHJOIN_NUM_PARTITIONS_KEY, 1, 128); // 1 means - no
spilling
+ public static final LongValidator HASHJOIN_NUM_PARTITIONS_VALIDATOR = new
RangeLongValidator(HASHJOIN_NUM_PARTITIONS_KEY, 1, 128, null); // 1 means - no
spilling
public static final String HASHJOIN_MAX_MEMORY_KEY =
"drill.exec.hashjoin.mem_limit";
- public static final LongValidator HASHJOIN_MAX_MEMORY_VALIDATOR = new
RangeLongValidator(HASHJOIN_MAX_MEMORY_KEY, 0L, Long.MAX_VALUE);
+ public static final LongValidator HASHJOIN_MAX_MEMORY_VALIDATOR = new
RangeLongValidator(HASHJOIN_MAX_MEMORY_KEY, 0L, Long.MAX_VALUE, null);
public static final String HASHJOIN_SPILL_DIRS =
"drill.exec.hashjoin.spill.directories";
public static final String HASHJOIN_SPILL_FILESYSTEM =
"drill.exec.hashjoin.spill.fs";
public static final String HASHJOIN_FALLBACK_ENABLED_KEY =
"drill.exec.hashjoin.fallback.enabled";
- public static final BooleanValidator HASHJOIN_FALLBACK_ENABLED_VALIDATOR =
new BooleanValidator(HASHJOIN_FALLBACK_ENABLED_KEY);
+ public static final BooleanValidator HASHJOIN_FALLBACK_ENABLED_VALIDATOR =
new BooleanValidator(HASHJOIN_FALLBACK_ENABLED_KEY, null);
public static final String HASHJOIN_ENABLE_RUNTIME_FILTER_KEY =
"exec.hashjoin.enable.runtime_filter";
- public static final BooleanValidator HASHJOIN_ENABLE_RUNTIME_FILTER = new
BooleanValidator(HASHJOIN_ENABLE_RUNTIME_FILTER_KEY);
+ public static final BooleanValidator HASHJOIN_ENABLE_RUNTIME_FILTER = new
BooleanValidator(HASHJOIN_ENABLE_RUNTIME_FILTER_KEY, null);
public static final String HASHJOIN_BLOOM_FILTER_MAX_SIZE_KEY =
"exec.hashjoin.bloom_filter.max.size";
- public static final IntegerValidator HASHJOIN_BLOOM_FILTER_MAX_SIZE = new
IntegerValidator(HASHJOIN_BLOOM_FILTER_MAX_SIZE_KEY);
+ public static final IntegerValidator HASHJOIN_BLOOM_FILTER_MAX_SIZE = new
IntegerValidator(HASHJOIN_BLOOM_FILTER_MAX_SIZE_KEY, null);
public static final String HASHJOIN_BLOOM_FILTER_FPP_KEY =
"exec.hashjoin.bloom_filter.fpp";
- public static final DoubleValidator HASHJOIN_BLOOM_FILTER_FPP_VALIDATOR =
new RangeDoubleValidator(HASHJOIN_BLOOM_FILTER_FPP_KEY, Double.MIN_VALUE, 1.0);
+ public static final DoubleValidator HASHJOIN_BLOOM_FILTER_FPP_VALIDATOR =
new RangeDoubleValidator(HASHJOIN_BLOOM_FILTER_FPP_KEY, Double.MIN_VALUE, 1.0,
null);
// Hash Aggregate Options
public static final String HASHAGG_NUM_PARTITIONS_KEY =
"exec.hashagg.num_partitions";
- public static final LongValidator HASHAGG_NUM_PARTITIONS_VALIDATOR = new
RangeLongValidator(HASHAGG_NUM_PARTITIONS_KEY, 1, 128); // 1 means - no spilling
+ public static final LongValidator HASHAGG_NUM_PARTITIONS_VALIDATOR = new
RangeLongValidator(HASHAGG_NUM_PARTITIONS_KEY, 1, 128, null); // 1 means - no
spilling
public static final String HASHAGG_MAX_MEMORY_KEY = "exec.hashagg.mem_limit";
- public static final LongValidator HASHAGG_MAX_MEMORY_VALIDATOR = new
RangeLongValidator(HASHAGG_MAX_MEMORY_KEY, 0, Integer.MAX_VALUE);
+ public static final LongValidator HASHAGG_MAX_MEMORY_VALIDATOR = new
RangeLongValidator(HASHAGG_MAX_MEMORY_KEY, 0, Integer.MAX_VALUE, null);
// min batches is used for tuning (each partition needs so many batches when
planning the number of partitions,
// or reserve this number when calculating whether the remaining available
memory is too small and requires a spill.)
// Low value may OOM (e.g., when incoming rows become wider), higher values
use fewer partitions but are safer
public static final String HASHAGG_MIN_BATCHES_PER_PARTITION_KEY =
"exec.hashagg.min_batches_per_partition";
- public static final LongValidator
HASHAGG_MIN_BATCHES_PER_PARTITION_VALIDATOR = new
RangeLongValidator(HASHAGG_MIN_BATCHES_PER_PARTITION_KEY, 1, 5);
+ public static final LongValidator
HASHAGG_MIN_BATCHES_PER_PARTITION_VALIDATOR = new
RangeLongValidator(HASHAGG_MIN_BATCHES_PER_PARTITION_KEY, 1, 5, null);
// Can be turned off mainly for testing. Memory prediction is used to decide
on when to spill to disk; with this option off,
// spill would be triggered only by another mechanism -- "catch OOMs and
then spill".
public static final String HASHAGG_USE_MEMORY_PREDICTION_KEY =
"exec.hashagg.use_memory_prediction";
- public static final BooleanValidator HASHAGG_USE_MEMORY_PREDICTION_VALIDATOR
= new BooleanValidator(HASHAGG_USE_MEMORY_PREDICTION_KEY);
+ public static final BooleanValidator HASHAGG_USE_MEMORY_PREDICTION_VALIDATOR
= new BooleanValidator(HASHAGG_USE_MEMORY_PREDICTION_KEY, null);
public static final String HASHAGG_SPILL_DIRS =
"drill.exec.hashagg.spill.directories";
public static final String HASHAGG_SPILL_FILESYSTEM =
"drill.exec.hashagg.spill.fs";
public static final String HASHAGG_FALLBACK_ENABLED_KEY =
"drill.exec.hashagg.fallback.enabled";
- public static final BooleanValidator HASHAGG_FALLBACK_ENABLED_VALIDATOR =
new BooleanValidator(HASHAGG_FALLBACK_ENABLED_KEY);
+ public static final BooleanValidator HASHAGG_FALLBACK_ENABLED_VALIDATOR =
new BooleanValidator(HASHAGG_FALLBACK_ENABLED_KEY, null);
public static final String SSL_PROVIDER = "drill.exec.ssl.provider"; //
valid values are "JDK", "OPENSSL" // default JDK
public static final String SSL_PROTOCOL = "drill.exec.ssl.protocol"; //
valid values are SSL, SSLV2, SSLV3, TLS, TLSV1, TLSv1.1, TLSv1.2(default)
@@ -264,102 +266,116 @@ private ExecConstants() {
public static final String DEFAULT_TEMPORARY_WORKSPACE =
"drill.exec.default_temporary_workspace";
public static final String OUTPUT_FORMAT_OPTION = "store.format";
- public static final OptionValidator OUTPUT_FORMAT_VALIDATOR = new
StringValidator(OUTPUT_FORMAT_OPTION);
- public static final String PARQUET_BLOCK_SIZE = "store.parquet.block-size";
+ public static final OptionValidator OUTPUT_FORMAT_VALIDATOR = new
StringValidator(OUTPUT_FORMAT_OPTION,
+ new OptionDescription("Output format for data written to tables with the
CREATE TABLE AS (CTAS) command. Allowed values are parquet, json, psv, csv, or
tsv."));
public static final String PARQUET_WRITER_USE_SINGLE_FS_BLOCK =
"store.parquet.writer.use_single_fs_block";
public static final OptionValidator
PARQUET_WRITER_USE_SINGLE_FS_BLOCK_VALIDATOR = new BooleanValidator(
- PARQUET_WRITER_USE_SINGLE_FS_BLOCK);
- public static final OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new
PositiveLongValidator(PARQUET_BLOCK_SIZE, Integer.MAX_VALUE);
+ PARQUET_WRITER_USE_SINGLE_FS_BLOCK, null);
+ public static final String PARQUET_BLOCK_SIZE = "store.parquet.block-size";
+ public static final OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new
PositiveLongValidator(PARQUET_BLOCK_SIZE, Integer.MAX_VALUE,
+ new OptionDescription("Sets the size of a Parquet row group to the
number of bytes less than or equal to the block size of MFS, HDFS, or the file
system."));
public static final String PARQUET_PAGE_SIZE = "store.parquet.page-size";
- public static final OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new
PositiveLongValidator(PARQUET_PAGE_SIZE, Integer.MAX_VALUE);
+ public static final OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new
PositiveLongValidator(PARQUET_PAGE_SIZE, Integer.MAX_VALUE, null);
public static final String PARQUET_DICT_PAGE_SIZE =
"store.parquet.dictionary.page-size";
- public static final OptionValidator PARQUET_DICT_PAGE_SIZE_VALIDATOR = new
PositiveLongValidator(PARQUET_DICT_PAGE_SIZE, Integer.MAX_VALUE);
+ public static final OptionValidator PARQUET_DICT_PAGE_SIZE_VALIDATOR = new
PositiveLongValidator(PARQUET_DICT_PAGE_SIZE, Integer.MAX_VALUE,
+ new OptionDescription("For internal use. Do not change."));
public static final String PARQUET_WRITER_COMPRESSION_TYPE =
"store.parquet.compression";
public static final OptionValidator
PARQUET_WRITER_COMPRESSION_TYPE_VALIDATOR = new EnumeratedStringValidator(
- PARQUET_WRITER_COMPRESSION_TYPE, "snappy", "gzip", "none");
+ PARQUET_WRITER_COMPRESSION_TYPE, new OptionDescription("Compression type
for storing Parquet output. Allowed values: snappy, gzip, none"), "snappy",
"gzip", "none");
public static final String PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING =
"store.parquet.enable_dictionary_encoding";
public static final OptionValidator
PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING_VALIDATOR = new BooleanValidator(
- PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING);
+ PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING,
+ new OptionDescription("For internal use. Do not change."));
public static final String PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS
= "store.parquet.writer.use_primitive_types_for_decimals";
public static final OptionValidator
PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS_VALIDATOR = new
BooleanValidator(
- PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS);
+ PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS, null);
public static final String PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS
= "store.parquet.writer.logical_type_for_decimals";
public static final OptionValidator
PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS_VALIDATOR
- = new
EnumeratedStringValidator(PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS,
"fixed_len_byte_array", "binary");
+ = new
EnumeratedStringValidator(PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS, null,
"fixed_len_byte_array", "binary");
public static final String PARQUET_VECTOR_FILL_THRESHOLD =
"store.parquet.vector_fill_threshold";
- public static final OptionValidator PARQUET_VECTOR_FILL_THRESHOLD_VALIDATOR
= new PositiveLongValidator(PARQUET_VECTOR_FILL_THRESHOLD, 99l);
+ public static final OptionValidator PARQUET_VECTOR_FILL_THRESHOLD_VALIDATOR
= new PositiveLongValidator(PARQUET_VECTOR_FILL_THRESHOLD, 99L, null);
public static final String PARQUET_VECTOR_FILL_CHECK_THRESHOLD =
"store.parquet.vector_fill_check_threshold";
- public static final OptionValidator
PARQUET_VECTOR_FILL_CHECK_THRESHOLD_VALIDATOR = new
PositiveLongValidator(PARQUET_VECTOR_FILL_CHECK_THRESHOLD, 100l);
+ public static final OptionValidator
PARQUET_VECTOR_FILL_CHECK_THRESHOLD_VALIDATOR = new
PositiveLongValidator(PARQUET_VECTOR_FILL_CHECK_THRESHOLD, 100L, null);
public static final String PARQUET_NEW_RECORD_READER =
"store.parquet.use_new_reader";
- public static final OptionValidator
PARQUET_RECORD_READER_IMPLEMENTATION_VALIDATOR = new
BooleanValidator(PARQUET_NEW_RECORD_READER);
+ public static final OptionValidator
PARQUET_RECORD_READER_IMPLEMENTATION_VALIDATOR = new
BooleanValidator(PARQUET_NEW_RECORD_READER,
+ new OptionDescription("Not supported in this release."));
public static final String PARQUET_READER_INT96_AS_TIMESTAMP =
"store.parquet.reader.int96_as_timestamp";
- public static final OptionValidator
PARQUET_READER_INT96_AS_TIMESTAMP_VALIDATOR = new
BooleanValidator(PARQUET_READER_INT96_AS_TIMESTAMP);
+ public static final OptionValidator
PARQUET_READER_INT96_AS_TIMESTAMP_VALIDATOR = new
BooleanValidator(PARQUET_READER_INT96_AS_TIMESTAMP,
+ new OptionDescription("Enables Drill to implicitly interpret the INT96
timestamp data type in Parquet files."));
public static final String PARQUET_PAGEREADER_ASYNC =
"store.parquet.reader.pagereader.async";
- public static final OptionValidator PARQUET_PAGEREADER_ASYNC_VALIDATOR = new
BooleanValidator(PARQUET_PAGEREADER_ASYNC);
+ public static final OptionValidator PARQUET_PAGEREADER_ASYNC_VALIDATOR = new
BooleanValidator(PARQUET_PAGEREADER_ASYNC,
+ new OptionDescription("Enable the asynchronous page reader. This
pipelines the reading of data from disk for high performance."));
// Number of pages the Async Parquet page reader will read before blocking
public static final String PARQUET_PAGEREADER_QUEUE_SIZE =
"store.parquet.reader.pagereader.queuesize";
- public static final OptionValidator PARQUET_PAGEREADER_QUEUE_SIZE_VALIDATOR
= new PositiveLongValidator(PARQUET_PAGEREADER_QUEUE_SIZE, Integer.MAX_VALUE);
+ public static final OptionValidator PARQUET_PAGEREADER_QUEUE_SIZE_VALIDATOR
= new PositiveLongValidator(PARQUET_PAGEREADER_QUEUE_SIZE, Integer.MAX_VALUE,
null);
public static final String PARQUET_PAGEREADER_ENFORCETOTALSIZE =
"store.parquet.reader.pagereader.enforceTotalSize";
- public static final OptionValidator
PARQUET_PAGEREADER_ENFORCETOTALSIZE_VALIDATOR = new
BooleanValidator(PARQUET_PAGEREADER_ENFORCETOTALSIZE);
+ public static final OptionValidator
PARQUET_PAGEREADER_ENFORCETOTALSIZE_VALIDATOR = new
BooleanValidator(PARQUET_PAGEREADER_ENFORCETOTALSIZE, null);
public static final String PARQUET_COLUMNREADER_ASYNC =
"store.parquet.reader.columnreader.async";
- public static final OptionValidator PARQUET_COLUMNREADER_ASYNC_VALIDATOR =
new BooleanValidator(PARQUET_COLUMNREADER_ASYNC);
+ public static final OptionValidator PARQUET_COLUMNREADER_ASYNC_VALIDATOR =
new BooleanValidator(PARQUET_COLUMNREADER_ASYNC,
+ new OptionDescription("Turn on parallel decoding of column data from
Parquet to the in memory format. This increases CPU usage and is most useful
for compressed fixed width data. With increasing concurrency, this option may
cause queries to run slower and should be turned on only for performance
critical queries."));
// Use a buffering reader for Parquet page reader
public static final String PARQUET_PAGEREADER_USE_BUFFERED_READ =
"store.parquet.reader.pagereader.bufferedread";
- public static final OptionValidator
PARQUET_PAGEREADER_USE_BUFFERED_READ_VALIDATOR = new
BooleanValidator(PARQUET_PAGEREADER_USE_BUFFERED_READ);
+ public static final OptionValidator
PARQUET_PAGEREADER_USE_BUFFERED_READ_VALIDATOR = new
BooleanValidator(PARQUET_PAGEREADER_USE_BUFFERED_READ,
+ new OptionDescription("Enable buffered page reading. Can improve disk
scan speeds by buffering data, but increases memory usage. This option is less
useful when the number of columns increases."));
// Size in MiB of the buffer the Parquet page reader will use to read from
disk. Default is 1 MiB
public static final String PARQUET_PAGEREADER_BUFFER_SIZE =
"store.parquet.reader.pagereader.buffersize";
- public static final OptionValidator PARQUET_PAGEREADER_BUFFER_SIZE_VALIDATOR
= new LongValidator(PARQUET_PAGEREADER_BUFFER_SIZE);
+ public static final OptionValidator PARQUET_PAGEREADER_BUFFER_SIZE_VALIDATOR
= new LongValidator(PARQUET_PAGEREADER_BUFFER_SIZE,
+ new OptionDescription("The size of the buffer (in bytes) to use if
bufferedread is true. Has no effect otherwise."));
// try to use fadvise if available
public static final String PARQUET_PAGEREADER_USE_FADVISE =
"store.parquet.reader.pagereader.usefadvise";
- public static final OptionValidator PARQUET_PAGEREADER_USE_FADVISE_VALIDATOR
= new BooleanValidator(PARQUET_PAGEREADER_USE_FADVISE);
+ public static final OptionValidator PARQUET_PAGEREADER_USE_FADVISE_VALIDATOR
= new BooleanValidator(PARQUET_PAGEREADER_USE_FADVISE,
+ new OptionDescription("If the file system supports it, the Parquet file
reader issues an fadvise call to enable file server side sequential reading and
caching. Since many HDFS implementations do not support this and because this
may have no effect in conditions of high concurrency, the option is set to
false. Useful for benchmarks and for performance critical queries."));
- public static final OptionValidator COMPILE_SCALAR_REPLACEMENT = new
BooleanValidator("exec.compile.scalar_replacement");
+ public static final OptionValidator COMPILE_SCALAR_REPLACEMENT = new
BooleanValidator("exec.compile.scalar_replacement", null);
// Controls whether to enable bulk parquet reader processing
public static final String PARQUET_FLAT_READER_BULK =
"store.parquet.flat.reader.bulk";
- public static final OptionValidator PARQUET_FLAT_READER_BULK_VALIDATOR = new
BooleanValidator(PARQUET_FLAT_READER_BULK);
+ public static final OptionValidator PARQUET_FLAT_READER_BULK_VALIDATOR = new
BooleanValidator(PARQUET_FLAT_READER_BULK, null);
// Controls the flat parquet reader batching constraints (number of record
and memory limit)
public static final String PARQUET_FLAT_BATCH_NUM_RECORDS =
"store.parquet.flat.batch.num_records";
- public static final OptionValidator PARQUET_FLAT_BATCH_NUM_RECORDS_VALIDATOR
= new RangeLongValidator(PARQUET_FLAT_BATCH_NUM_RECORDS, 1,
ValueVector.MAX_ROW_COUNT);
+ public static final OptionValidator PARQUET_FLAT_BATCH_NUM_RECORDS_VALIDATOR
= new RangeLongValidator(PARQUET_FLAT_BATCH_NUM_RECORDS, 1,
ValueVector.MAX_ROW_COUNT, null);
public static final String PARQUET_FLAT_BATCH_MEMORY_SIZE =
"store.parquet.flat.batch.memory_size";
// This configuration is used to overwrite the common memory batch sizing
configuration property
- public static final OptionValidator PARQUET_FLAT_BATCH_MEMORY_SIZE_VALIDATOR
= new RangeLongValidator(PARQUET_FLAT_BATCH_MEMORY_SIZE, 0, Integer.MAX_VALUE);
+ public static final OptionValidator PARQUET_FLAT_BATCH_MEMORY_SIZE_VALIDATOR
= new RangeLongValidator(PARQUET_FLAT_BATCH_MEMORY_SIZE, 0, Integer.MAX_VALUE,
null);
public static final String JSON_ALL_TEXT_MODE = "store.json.all_text_mode";
- public static final BooleanValidator JSON_READER_ALL_TEXT_MODE_VALIDATOR =
new BooleanValidator(JSON_ALL_TEXT_MODE);
- public static final BooleanValidator JSON_EXTENDED_TYPES = new
BooleanValidator("store.json.extended_types");
- public static final BooleanValidator JSON_WRITER_UGLIFY = new
BooleanValidator("store.json.writer.uglify");
- public static final BooleanValidator JSON_WRITER_SKIPNULLFIELDS = new
BooleanValidator("store.json.writer.skip_null_fields");
+ public static final BooleanValidator JSON_READER_ALL_TEXT_MODE_VALIDATOR =
new BooleanValidator(JSON_ALL_TEXT_MODE,
+ new OptionDescription("Drill reads all data from the JSON files as
VARCHAR. Prevents schema change errors."));
+ public static final BooleanValidator JSON_EXTENDED_TYPES = new
BooleanValidator("store.json.extended_types",
+ new OptionDescription("Turns on special JSON structures that Drill
serializes for storing more type information than the four basic JSON types."));
+ public static final BooleanValidator JSON_WRITER_UGLIFY = new
BooleanValidator("store.json.writer.uglify", null);
+ public static final BooleanValidator JSON_WRITER_SKIPNULLFIELDS = new
BooleanValidator("store.json.writer.skip_null_fields", null);
public static final String JSON_READER_SKIP_INVALID_RECORDS_FLAG =
"store.json.reader.skip_invalid_records";
- public static final BooleanValidator JSON_SKIP_MALFORMED_RECORDS_VALIDATOR =
new BooleanValidator(JSON_READER_SKIP_INVALID_RECORDS_FLAG);
+ public static final BooleanValidator JSON_SKIP_MALFORMED_RECORDS_VALIDATOR =
new BooleanValidator(JSON_READER_SKIP_INVALID_RECORDS_FLAG, null);
public static final String JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG =
"store.json.reader.print_skipped_invalid_record_number";
- public static final BooleanValidator
JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG_VALIDATOR = new
BooleanValidator(JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG);
- public static final DoubleValidator TEXT_ESTIMATED_ROW_SIZE = new
RangeDoubleValidator("store.text.estimated_row_size_bytes", 1, Long.MAX_VALUE);
+ public static final BooleanValidator
JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG_VALIDATOR = new
BooleanValidator(JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG, null);
+ public static final DoubleValidator TEXT_ESTIMATED_ROW_SIZE = new
RangeDoubleValidator("store.text.estimated_row_size_bytes", 1, Long.MAX_VALUE,
+ new OptionDescription("Estimate of the row size in a delimited text
file, such as csv. The closer to actual, the better the query plan. Used for
all csv files in the system/session where the value is set. Impacts the
decision to plan a broadcast join or not."));
/**
* Json writer option for writing `NaN` and `Infinity` tokens as numbers
(not enclosed with double quotes)
*/
public static final String JSON_WRITER_NAN_INF_NUMBERS =
"store.json.writer.allow_nan_inf";
- public static final BooleanValidator JSON_WRITER_NAN_INF_NUMBERS_VALIDATOR =
new BooleanValidator(JSON_WRITER_NAN_INF_NUMBERS);
+ public static final BooleanValidator JSON_WRITER_NAN_INF_NUMBERS_VALIDATOR =
new BooleanValidator(JSON_WRITER_NAN_INF_NUMBERS, null);
/**
* Json reader option that enables parser to read `NaN` and `Infinity`
tokens as numbers
*/
public static final String JSON_READER_NAN_INF_NUMBERS =
"store.json.reader.allow_nan_inf";
- public static final BooleanValidator JSON_READER_NAN_INF_NUMBERS_VALIDATOR =
new BooleanValidator(JSON_READER_NAN_INF_NUMBERS);
+ public static final BooleanValidator JSON_READER_NAN_INF_NUMBERS_VALIDATOR =
new BooleanValidator(JSON_READER_NAN_INF_NUMBERS, null);
/**
* The column label (for directory levels) in results when querying files in
a directory
* E.g. labels: dir0 dir1<pre>
@@ -368,44 +384,54 @@ private ExecConstants() {
* |- baz - b.parquet</pre>
*/
public static final String FILESYSTEM_PARTITION_COLUMN_LABEL =
"drill.exec.storage.file.partition.column.label";
- public static final StringValidator
FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new
StringValidator(FILESYSTEM_PARTITION_COLUMN_LABEL);
+ public static final StringValidator
FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new
StringValidator(FILESYSTEM_PARTITION_COLUMN_LABEL,
+ new OptionDescription("The column label for directory levels in results
of queries of files in a directory. Accepts a string input."));
/**
* Implicit file columns
*/
public static final String IMPLICIT_FILENAME_COLUMN_LABEL =
"drill.exec.storage.implicit.filename.column.label";
- public static final OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR
= new StringValidator(IMPLICIT_FILENAME_COLUMN_LABEL);
+ public static final OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR
= new StringValidator(IMPLICIT_FILENAME_COLUMN_LABEL,
+ new OptionDescription("Available as of Drill 1.10. Sets the implicit
column name for the filename column."));
public static final String IMPLICIT_SUFFIX_COLUMN_LABEL =
"drill.exec.storage.implicit.suffix.column.label";
- public static final OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR =
new StringValidator(IMPLICIT_SUFFIX_COLUMN_LABEL);
+ public static final OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR =
new StringValidator(IMPLICIT_SUFFIX_COLUMN_LABEL,
+ new OptionDescription("Available as of Drill 1.10. Sets the implicit
column name for the suffix column."));
public static final String IMPLICIT_FQN_COLUMN_LABEL =
"drill.exec.storage.implicit.fqn.column.label";
- public static final OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR =
new StringValidator(IMPLICIT_FQN_COLUMN_LABEL);
+ public static final OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR =
new StringValidator(IMPLICIT_FQN_COLUMN_LABEL,
+ new OptionDescription("Available as of Drill 1.10. Sets the implicit
column name for the fqn column."));
public static final String IMPLICIT_FILEPATH_COLUMN_LABEL =
"drill.exec.storage.implicit.filepath.column.label";
- public static final OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR
= new StringValidator(IMPLICIT_FILEPATH_COLUMN_LABEL);
+ public static final OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR
= new StringValidator(IMPLICIT_FILEPATH_COLUMN_LABEL,
+ new OptionDescription("Available as of Drill 1.10. Sets the implicit
column name for the filepath column."));
public static final String JSON_READ_NUMBERS_AS_DOUBLE =
"store.json.read_numbers_as_double";
- public static final BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR =
new BooleanValidator(JSON_READ_NUMBERS_AS_DOUBLE);
+ public static final BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR =
new BooleanValidator(JSON_READ_NUMBERS_AS_DOUBLE,
+ new OptionDescription("Reads numbers with or without a decimal point as
DOUBLE. Prevents schema change errors."));
public static final String MONGO_ALL_TEXT_MODE = "store.mongo.all_text_mode";
- public static final OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR =
new BooleanValidator(MONGO_ALL_TEXT_MODE);
+ public static final OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR =
new BooleanValidator(MONGO_ALL_TEXT_MODE,
+ new OptionDescription("Similar to store.json.all_text_mode for
MongoDB."));
public static final String MONGO_READER_READ_NUMBERS_AS_DOUBLE =
"store.mongo.read_numbers_as_double";
- public static final OptionValidator
MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new
BooleanValidator(MONGO_READER_READ_NUMBERS_AS_DOUBLE);
+ public static final OptionValidator
MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new
BooleanValidator(MONGO_READER_READ_NUMBERS_AS_DOUBLE,
+ new OptionDescription("Similar to store.json.read_numbers_as_double."));
public static final String MONGO_BSON_RECORD_READER =
"store.mongo.bson.record.reader";
- public static final OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new
BooleanValidator(MONGO_BSON_RECORD_READER);
+ public static final OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new
BooleanValidator(MONGO_BSON_RECORD_READER, null);
public static final String ENABLE_UNION_TYPE_KEY = "exec.enable_union_type";
- public static final BooleanValidator ENABLE_UNION_TYPE = new
BooleanValidator(ENABLE_UNION_TYPE_KEY);
+ public static final BooleanValidator ENABLE_UNION_TYPE = new
BooleanValidator(ENABLE_UNION_TYPE_KEY,
+ new OptionDescription("Enable support for Avro union type."));
// Kafka plugin related options.
public static final String KAFKA_ALL_TEXT_MODE = "store.kafka.all_text_mode";
- public static final OptionValidator KAFKA_READER_ALL_TEXT_MODE_VALIDATOR =
new BooleanValidator(KAFKA_ALL_TEXT_MODE);
+ public static final OptionValidator KAFKA_READER_ALL_TEXT_MODE_VALIDATOR =
new BooleanValidator(KAFKA_ALL_TEXT_MODE,
+ new OptionDescription("Similar to store.json.all_text_mode for Kafka."));
public static final String KAFKA_READER_READ_NUMBERS_AS_DOUBLE =
"store.kafka.read_numbers_as_double";
public static final OptionValidator
KAFKA_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(
- KAFKA_READER_READ_NUMBERS_AS_DOUBLE);
+ KAFKA_READER_READ_NUMBERS_AS_DOUBLE, new OptionDescription("Similar to
store.json.read_numbers_as_double."));
public static final String KAFKA_RECORD_READER = "store.kafka.record.reader";
- public static final OptionValidator KAFKA_RECORD_READER_VALIDATOR = new
StringValidator(KAFKA_RECORD_READER);
+ public static final OptionValidator KAFKA_RECORD_READER_VALIDATOR = new
StringValidator(KAFKA_RECORD_READER, null);
public static final String KAFKA_POLL_TIMEOUT = "store.kafka.poll.timeout";
public static final PositiveLongValidator KAFKA_POLL_TIMEOUT_VALIDATOR = new
PositiveLongValidator(KAFKA_POLL_TIMEOUT,
- Long.MAX_VALUE);
+ Long.MAX_VALUE, null);
// TODO: We need to add a feature that enables storage plugins to add their
own options. Currently we have to declare
// in core which is not right. Move this option and above two mongo plugin
related options once we have the feature.
@@ -413,39 +439,47 @@ private ExecConstants() {
public static final String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS =
"store.hive.optimize_scan_with_native_readers";
@Deprecated // TODO: DRILL-6527. It should be removed starting from next
Drill 1.15.0 release
public static final OptionValidator
HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR =
- new BooleanValidator(HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS);
+ new BooleanValidator(HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS, null);
public static final String HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER =
"store.hive.parquet.optimize_scan_with_native_reader";
public static final OptionValidator
HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER_VALIDATOR =
- new BooleanValidator(HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER);
+ new BooleanValidator(HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER,
+ new OptionDescription("Optimize reads of Parquet-backed external
tables from Hive by using Drill native readers instead of the Hive Serde
interface. (Drill 1.2 and later)"));
public static final String HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER
= "store.hive.maprdb_json.optimize_scan_with_native_reader";
public static final OptionValidator
HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER_VALIDATOR =
- new BooleanValidator(HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER);
+ new BooleanValidator(HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER,
null);
public static final String HIVE_CONF_PROPERTIES =
"store.hive.conf.properties";
- public static final OptionValidator HIVE_CONF_PROPERTIES_VALIDATOR = new
StringValidator(HIVE_CONF_PROPERTIES);
+ public static final OptionValidator HIVE_CONF_PROPERTIES_VALIDATOR = new
StringValidator(HIVE_CONF_PROPERTIES, null);
public static final String SLICE_TARGET = "planner.slice_target";
- public static final long SLICE_TARGET_DEFAULT = 100000l;
- public static final PositiveLongValidator SLICE_TARGET_OPTION = new
PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE);
+ public static final long SLICE_TARGET_DEFAULT = 100000L;
+ public static final PositiveLongValidator SLICE_TARGET_OPTION = new
PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE,
+ new OptionDescription("The number of records manipulated within a
fragment before Drill parallelizes operations."));
public static final String CAST_TO_NULLABLE_NUMERIC =
"drill.exec.functions.cast_empty_string_to_null";
- public static final BooleanValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new
BooleanValidator(CAST_TO_NULLABLE_NUMERIC);
+ public static final BooleanValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new
BooleanValidator(CAST_TO_NULLABLE_NUMERIC,
+ new OptionDescription("In a text file, treat empty fields as NULL values
instead of empty string."));
/**
* HashTable runtime settings
*/
public static final String MIN_HASH_TABLE_SIZE_KEY =
"exec.min_hash_table_size";
- public static final PositiveLongValidator MIN_HASH_TABLE_SIZE = new
PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY);
+ public static final PositiveLongValidator MIN_HASH_TABLE_SIZE = new
PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY,
+ new OptionDescription("Starting size in bucketsfor hash tables. Increase
according to available memory to improve performance. Increasing for very large
aggregations or joins when you have large amounts of memory for Drill to use.
Range: 0 - 1073741824."));
public static final String MAX_HASH_TABLE_SIZE_KEY =
"exec.max_hash_table_size";
- public static final PositiveLongValidator MAX_HASH_TABLE_SIZE = new
PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY);
+ public static final PositiveLongValidator MAX_HASH_TABLE_SIZE = new
PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY,
+ new OptionDescription("Ending size in buckets for hash tables. Range: 0
- 1073741824."));
/**
* Limits the maximum level of parallelization to this factor time the
number of Drillbits
*/
public static final String CPU_LOAD_AVERAGE_KEY = "planner.cpu_load_average";
- public static final DoubleValidator CPU_LOAD_AVERAGE = new
DoubleValidator(CPU_LOAD_AVERAGE_KEY);
+ public static final DoubleValidator CPU_LOAD_AVERAGE = new
DoubleValidator(CPU_LOAD_AVERAGE_KEY,
+ new OptionDescription("Limits the maximum level of parallelization to
this factor time the number of Drillbits"));
public static final String MAX_WIDTH_PER_NODE_KEY =
"planner.width.max_per_node";
- public static final MaxWidthValidator MAX_WIDTH_PER_NODE = new
MaxWidthValidator(MAX_WIDTH_PER_NODE_KEY);
+ public static final MaxWidthValidator MAX_WIDTH_PER_NODE = new
MaxWidthValidator(MAX_WIDTH_PER_NODE_KEY,
+ new OptionDescription("Maximum number of threads that can run in
parallel for a query on a node. A slice is an individual thread. This number
indicates the maximum number of slices per query for the query's major fragment
on a node.",
+ "Max number of threads that can run in parallel for a query on a
node."));
/**
* The maximum level or parallelization any stage of the query can do. Note
that while this
@@ -453,22 +487,25 @@ private ExecConstants() {
* number of we want to do things like speed results return.
*/
public static final String MAX_WIDTH_GLOBAL_KEY =
"planner.width.max_per_query";
- public static final OptionValidator MAX_WIDTH_GLOBAL = new
PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY, Integer.MAX_VALUE);
+ public static final OptionValidator MAX_WIDTH_GLOBAL = new
PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY, Integer.MAX_VALUE,
+ new OptionDescription("Same as max per node but applies to the query as
executed by the entire cluster. For example, this value might be the number of
active Drillbits, or a higher number to return results faster."));
/**
* Factor by which a node with endpoint affinity will be favored while
creating assignment
*/
public static final String AFFINITY_FACTOR_KEY = "planner.affinity_factor";
- public static final OptionValidator AFFINITY_FACTOR = new
DoubleValidator(AFFINITY_FACTOR_KEY);
+ public static final OptionValidator AFFINITY_FACTOR = new
DoubleValidator(AFFINITY_FACTOR_KEY,
+ new OptionDescription("Factor by which a node with endpoint affinity
will be favored while creating assignment"));
public static final String EARLY_LIMIT0_OPT_KEY =
"planner.enable_limit0_optimization";
- public static final BooleanValidator EARLY_LIMIT0_OPT = new
BooleanValidator(EARLY_LIMIT0_OPT_KEY);
+ public static final BooleanValidator EARLY_LIMIT0_OPT = new
BooleanValidator(EARLY_LIMIT0_OPT_KEY, null);
public static final String LATE_LIMIT0_OPT_KEY =
"planner.enable_limit0_on_scan";
- public static final BooleanValidator LATE_LIMIT0_OPT = new
BooleanValidator(LATE_LIMIT0_OPT_KEY);
+ public static final BooleanValidator LATE_LIMIT0_OPT = new
BooleanValidator(LATE_LIMIT0_OPT_KEY, null);
public static final String ENABLE_MEMORY_ESTIMATION_KEY =
"planner.memory.enable_memory_estimation";
- public static final OptionValidator ENABLE_MEMORY_ESTIMATION = new
BooleanValidator(ENABLE_MEMORY_ESTIMATION_KEY);
+ public static final OptionValidator ENABLE_MEMORY_ESTIMATION = new
BooleanValidator(ENABLE_MEMORY_ESTIMATION_KEY,
+ new OptionDescription("Toggles the state of memory estimation and
re-planning of the query. When enabled, Drill conservatively estimates memory
requirements and typically excludes these operators from the plan and
negatively impacts performance."));
/**
* Maximum query memory per node (in MB). Re-plan with cheaper operators if
@@ -477,7 +514,8 @@ private ExecConstants() {
* DEFAULT: 2048 MB
*/
public static final String MAX_QUERY_MEMORY_PER_NODE_KEY =
"planner.memory.max_query_memory_per_node";
- public static final LongValidator MAX_QUERY_MEMORY_PER_NODE = new
RangeLongValidator(MAX_QUERY_MEMORY_PER_NODE_KEY, 1024 * 1024,
DrillConfig.getMaxDirectMemory());
+ public static final LongValidator MAX_QUERY_MEMORY_PER_NODE = new
RangeLongValidator(MAX_QUERY_MEMORY_PER_NODE_KEY, 1024 * 1024,
DrillConfig.getMaxDirectMemory(),
+ new OptionDescription("Sets the maximum amount of direct memory
allocated to the Sort and Hash Aggregate operators during each query on a node.
This memory is split between operators. If a query plan contains multiple Sort
and/or Hash Aggregate operators, the memory is divided between them. The
default limit should be increased for queries on large data sets."));
/**
* Alternative way to compute per-query-per-node memory as a percent
@@ -505,7 +543,7 @@ private ExecConstants() {
public static String PERCENT_MEMORY_PER_QUERY_KEY =
"planner.memory.percent_per_query";
public static DoubleValidator PERCENT_MEMORY_PER_QUERY = new
RangeDoubleValidator(
- PERCENT_MEMORY_PER_QUERY_KEY, 0, 1.0);
+ PERCENT_MEMORY_PER_QUERY_KEY, 0, 1.0, new OptionDescription("Sets the
memory as a percentage of the total direct memory."));
/**
* Minimum memory allocated to each buffered operator instance.
@@ -513,7 +551,8 @@ private ExecConstants() {
* DEFAULT: 40 MB
*/
public static final String MIN_MEMORY_PER_BUFFERED_OP_KEY =
"planner.memory.min_memory_per_buffered_op";
- public static final LongValidator MIN_MEMORY_PER_BUFFERED_OP = new
RangeLongValidator(MIN_MEMORY_PER_BUFFERED_OP_KEY, 1024 * 1024, Long.MAX_VALUE);
+ public static final LongValidator MIN_MEMORY_PER_BUFFERED_OP = new
RangeLongValidator(MIN_MEMORY_PER_BUFFERED_OP_KEY, 1024 * 1024, Long.MAX_VALUE,
+ new OptionDescription("Minimum memory allocated to each buffered
operator instance"));
/**
* Extra query memory per node for non-blocking operators.
@@ -524,16 +563,20 @@ private ExecConstants() {
*/
public static final String NON_BLOCKING_OPERATORS_MEMORY_KEY =
"planner.memory.non_blocking_operators_memory";
public static final OptionValidator NON_BLOCKING_OPERATORS_MEMORY = new
PowerOfTwoLongValidator(
- NON_BLOCKING_OPERATORS_MEMORY_KEY, 1 << 11);
+ NON_BLOCKING_OPERATORS_MEMORY_KEY, 1 << 11,
+ new OptionDescription("Extra query memory per node for non-blocking
operators. This option is currently used only for memory estimation. Range:
0-2048 MB"));
public static final String HASH_JOIN_TABLE_FACTOR_KEY =
"planner.memory.hash_join_table_factor";
- public static final OptionValidator HASH_JOIN_TABLE_FACTOR = new
DoubleValidator(HASH_JOIN_TABLE_FACTOR_KEY);
+ public static final OptionValidator HASH_JOIN_TABLE_FACTOR = new
DoubleValidator(HASH_JOIN_TABLE_FACTOR_KEY,
+ new OptionDescription("A heuristic value for influencing the size of the
hash aggregation table."));
public static final String HASH_AGG_TABLE_FACTOR_KEY =
"planner.memory.hash_agg_table_factor";
- public static final OptionValidator HASH_AGG_TABLE_FACTOR = new
DoubleValidator(HASH_AGG_TABLE_FACTOR_KEY);
+ public static final OptionValidator HASH_AGG_TABLE_FACTOR = new
DoubleValidator(HASH_AGG_TABLE_FACTOR_KEY,
+ new OptionDescription("A heuristic value for influencing the size of the
hash aggregation table."));
public static final String AVERAGE_FIELD_WIDTH_KEY =
"planner.memory.average_field_width";
- public static final OptionValidator AVERAGE_FIELD_WIDTH = new
PositiveLongValidator(AVERAGE_FIELD_WIDTH_KEY, Long.MAX_VALUE);
+ public static final OptionValidator AVERAGE_FIELD_WIDTH = new
PositiveLongValidator(AVERAGE_FIELD_WIDTH_KEY, Long.MAX_VALUE,
+ new OptionDescription("Used in estimating memory requirements."));
// Mux Exchange options.
public static final String ORDERED_MUX_EXCHANGE =
"planner.enable_ordered_mux_exchange";
@@ -548,45 +591,55 @@ private ExecConstants() {
// Enables queues. When running embedded, enables an in-process queue. When
// running distributed, enables the Zookeeper-based distributed queue.
- public static final BooleanValidator ENABLE_QUEUE = new
BooleanValidator("exec.queue.enable");
- public static final LongValidator LARGE_QUEUE_SIZE = new
PositiveLongValidator("exec.queue.large", 10_000);
- public static final LongValidator SMALL_QUEUE_SIZE = new
PositiveLongValidator("exec.queue.small", 100_000);
- public static final LongValidator QUEUE_THRESHOLD_SIZE = new
PositiveLongValidator("exec.queue.threshold", Long.MAX_VALUE);
- public static final LongValidator QUEUE_TIMEOUT = new
PositiveLongValidator("exec.queue.timeout_millis", Long.MAX_VALUE);
+ public static final BooleanValidator ENABLE_QUEUE = new
BooleanValidator("exec.queue.enable",
+ new OptionDescription("Changes the state of query queues. False allows
unlimited concurrent queries."));
+ public static final LongValidator LARGE_QUEUE_SIZE = new
PositiveLongValidator("exec.queue.large", 10_000,
+ new OptionDescription("Sets the number of large queries that can run
concurrently in the cluster. Range: 0-1000"));
+ public static final LongValidator SMALL_QUEUE_SIZE = new
PositiveLongValidator("exec.queue.small", 100_000,
+ new OptionDescription("Sets the number of small queries that can run
concurrently in the cluster. Range: 0-1001"));
+ public static final LongValidator QUEUE_THRESHOLD_SIZE = new
PositiveLongValidator("exec.queue.threshold", Long.MAX_VALUE,
+ new OptionDescription("Sets the cost threshold, which depends on the
complexity of the queries in queue, for determining whether query is large or
small. Complex queries have higher thresholds. Range: 0-9223372036854775807"));
+ public static final LongValidator QUEUE_TIMEOUT = new
PositiveLongValidator("exec.queue.timeout_millis", Long.MAX_VALUE,
+ new OptionDescription("Indicates how long a query can wait in queue
before the query fails. Range: 0-9223372036854775807"));
// Ratio of memory for small queries vs. large queries.
// Each small query gets 1 unit, each large query gets QUEUE_MEMORY_RATIO
units.
// A lower limit of 1 enforces the intuition that a large query should never
get
// *less* memory than a small one.
- public static final DoubleValidator QUEUE_MEMORY_RATIO = new
RangeDoubleValidator("exec.queue.memory_ratio", 1.0, 1000);
+ public static final DoubleValidator QUEUE_MEMORY_RATIO = new
RangeDoubleValidator("exec.queue.memory_ratio", 1.0, 1000, null);
- public static final DoubleValidator QUEUE_MEMORY_RESERVE = new
RangeDoubleValidator("exec.queue.memory_reserve_ratio", 0, 1.0);
+ public static final DoubleValidator QUEUE_MEMORY_RESERVE = new
RangeDoubleValidator("exec.queue.memory_reserve_ratio", 0, 1.0, null);
public static final String ENABLE_VERBOSE_ERRORS_KEY = "exec.errors.verbose";
- public static final OptionValidator ENABLE_VERBOSE_ERRORS = new
BooleanValidator(ENABLE_VERBOSE_ERRORS_KEY);
+ public static final OptionValidator ENABLE_VERBOSE_ERRORS = new
BooleanValidator(ENABLE_VERBOSE_ERRORS_KEY,
+ new OptionDescription("Toggles verbose output of executable error
messages"));
public static final String ENABLE_NEW_TEXT_READER_KEY =
"exec.storage.enable_new_text_reader";
- public static final OptionValidator ENABLE_NEW_TEXT_READER = new
BooleanValidator(ENABLE_NEW_TEXT_READER_KEY);
+ public static final OptionValidator ENABLE_NEW_TEXT_READER = new
BooleanValidator(ENABLE_NEW_TEXT_READER_KEY,
+ new OptionDescription("Enables the text reader that complies with the
RFC 4180 standard for text/csv files."));
public static final String BOOTSTRAP_STORAGE_PLUGINS_FILE =
"bootstrap-storage-plugins.json";
public static final String DRILL_SYS_FILE_SUFFIX = ".sys.drill";
public static final String ENABLE_WINDOW_FUNCTIONS = "window.enable";
- public static final OptionValidator ENABLE_WINDOW_FUNCTIONS_VALIDATOR = new
BooleanValidator(ENABLE_WINDOW_FUNCTIONS);
+ public static final OptionValidator ENABLE_WINDOW_FUNCTIONS_VALIDATOR = new
BooleanValidator(ENABLE_WINDOW_FUNCTIONS,
+ new OptionDescription("Enable or disable window functions in Drill 1.1
and later."));
public static final String DRILLBIT_CONTROL_INJECTIONS =
"drill.exec.testing.controls";
- public static final OptionValidator DRILLBIT_CONTROLS_VALIDATOR = new
ExecutionControls.ControlsOptionValidator(DRILLBIT_CONTROL_INJECTIONS, 1);
+ public static final OptionValidator DRILLBIT_CONTROLS_VALIDATOR = new
ExecutionControls.ControlsOptionValidator(DRILLBIT_CONTROL_INJECTIONS, 1, null);
public static final String NEW_VIEW_DEFAULT_PERMS_KEY =
"new_view_default_permissions";
- public static final OptionValidator NEW_VIEW_DEFAULT_PERMS_VALIDATOR = new
StringValidator(NEW_VIEW_DEFAULT_PERMS_KEY);
+ public static final OptionValidator NEW_VIEW_DEFAULT_PERMS_VALIDATOR = new
StringValidator(NEW_VIEW_DEFAULT_PERMS_KEY,
+ new OptionDescription("Sets view permissions using an octal code in the
Unix tradition."));
public static final String CTAS_PARTITIONING_HASH_DISTRIBUTE =
"store.partition.hash_distribute";
- public static final BooleanValidator
CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR = new
BooleanValidator(CTAS_PARTITIONING_HASH_DISTRIBUTE);
+ public static final BooleanValidator
CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR = new
BooleanValidator(CTAS_PARTITIONING_HASH_DISTRIBUTE,
+ new OptionDescription("Uses a hash algorithm to distribute data on
partition keys in a CTAS partitioning operation. An alpha option--for
experimental use at this stage. Do not use in production systems."));
public static final String ENABLE_BULK_LOAD_TABLE_LIST_KEY =
"exec.enable_bulk_load_table_list";
- public static final BooleanValidator ENABLE_BULK_LOAD_TABLE_LIST = new
BooleanValidator(ENABLE_BULK_LOAD_TABLE_LIST_KEY);
+ public static final BooleanValidator ENABLE_BULK_LOAD_TABLE_LIST = new
BooleanValidator(ENABLE_BULK_LOAD_TABLE_LIST_KEY, null);
/**
* When getting Hive Table information with exec.enable_bulk_load_table_list
set to true,
@@ -594,21 +647,21 @@ private ExecConstants() {
* at a time. (The number of tables can get to be quite large.)
*/
public static final String BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY =
"exec.bulk_load_table_list.bulk_size";
- public static final PositiveLongValidator BULK_LOAD_TABLE_LIST_BULK_SIZE =
new PositiveLongValidator(BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY,
Integer.MAX_VALUE);
+ public static final PositiveLongValidator BULK_LOAD_TABLE_LIST_BULK_SIZE =
new PositiveLongValidator(BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY,
Integer.MAX_VALUE, null);
/**
* Option whose value is a comma separated list of admin usernames. Admin
users are users who have special privileges
* such as changing system options.
*/
public static final String ADMIN_USERS_KEY = "security.admin.users";
- public static final AdminUsersValidator ADMIN_USERS_VALIDATOR = new
AdminUsersValidator(ADMIN_USERS_KEY);
+ public static final AdminUsersValidator ADMIN_USERS_VALIDATOR = new
AdminUsersValidator(ADMIN_USERS_KEY, null);
/**
* Option whose value is a comma separated list of admin usergroups.
*/
public static final String ADMIN_USER_GROUPS_KEY =
"security.admin.user_groups";
public static final AdminUserGroupsValidator ADMIN_USER_GROUPS_VALIDATOR =
- new AdminUserGroupsValidator(ADMIN_USER_GROUPS_KEY);
+ new AdminUserGroupsValidator(ADMIN_USER_GROUPS_KEY, null);
/**
* Option whose value is a string representing list of inbound impersonation
policies.
*
@@ -630,24 +683,25 @@ private ExecConstants() {
* Web settings
*/
public static final String WEB_LOGS_MAX_LINES = "web.logs.max_lines";
- public static final OptionValidator WEB_LOGS_MAX_LINES_VALIDATOR = new
PositiveLongValidator(WEB_LOGS_MAX_LINES, Integer.MAX_VALUE);
+ public static final OptionValidator WEB_LOGS_MAX_LINES_VALIDATOR = new
PositiveLongValidator(WEB_LOGS_MAX_LINES, Integer.MAX_VALUE, null);
public static final String CODE_GEN_EXP_IN_METHOD_SIZE =
"exec.java.compiler.exp_in_method_size";
- public static final LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR =
new LongValidator(CODE_GEN_EXP_IN_METHOD_SIZE);
+ public static final LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR =
new LongValidator(CODE_GEN_EXP_IN_METHOD_SIZE,
+ new OptionDescription("Introduced in Drill 1.8. For queries with complex
or multiple expressions in the query logic, this option limits the number of
expressions allowed in each method to prevent Drill from generating code that
exceeds the Java limit of 64K bytes. If a method approaches the 64K limit, the
Java compiler returns a message stating that the code is too large to compile.
If queries return such a message, reduce the value of this option at the
session level. The default value for this option is 50. The value is the count
of expressions allowed in a method. Expressions are added to a method until
they hit the Java 64K limit, when a new inner method is created and called from
the existing method. Note: This logic has not been implemented for all
operators. If a query uses operators for which the logic is not implemented,
reducing the setting for this option may not resolve the error. Setting this
option at the system level impacts all queries and can degrade query
performance."));
public static final String CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS =
"prepare.statement.create_timeout_ms";
public static final OptionValidator
CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS_VALIDATOR =
- new PositiveLongValidator(CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS,
Integer.MAX_VALUE);
+ new PositiveLongValidator(CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS,
Integer.MAX_VALUE, null);
public static final String DYNAMIC_UDF_SUPPORT_ENABLED =
"exec.udf.enable_dynamic_support";
- public static final BooleanValidator DYNAMIC_UDF_SUPPORT_ENABLED_VALIDATOR =
new BooleanValidator(DYNAMIC_UDF_SUPPORT_ENABLED);
+ public static final BooleanValidator DYNAMIC_UDF_SUPPORT_ENABLED_VALIDATOR =
new BooleanValidator(DYNAMIC_UDF_SUPPORT_ENABLED, null);
/**
* Option to save query profiles. If false, no query profile will be saved
* for any query.
*/
public static final String ENABLE_QUERY_PROFILE_OPTION =
"exec.query_profile.save";
- public static final BooleanValidator ENABLE_QUERY_PROFILE_VALIDATOR = new
BooleanValidator(ENABLE_QUERY_PROFILE_OPTION);
+ public static final BooleanValidator ENABLE_QUERY_PROFILE_VALIDATOR = new
BooleanValidator(ENABLE_QUERY_PROFILE_OPTION, null);
/**
* Profiles are normally written after the last client message to reduce
latency.
@@ -656,16 +710,16 @@ private ExecConstants() {
* verification.
*/
public static final String QUERY_PROFILE_DEBUG_OPTION =
"exec.query_profile.debug_mode";
- public static final BooleanValidator QUERY_PROFILE_DEBUG_VALIDATOR = new
BooleanValidator(QUERY_PROFILE_DEBUG_OPTION);
+ public static final BooleanValidator QUERY_PROFILE_DEBUG_VALIDATOR = new
BooleanValidator(QUERY_PROFILE_DEBUG_OPTION, null);
public static final String USE_DYNAMIC_UDFS_KEY = "exec.udf.use_dynamic";
- public static final BooleanValidator USE_DYNAMIC_UDFS = new
BooleanValidator(USE_DYNAMIC_UDFS_KEY);
+ public static final BooleanValidator USE_DYNAMIC_UDFS = new
BooleanValidator(USE_DYNAMIC_UDFS_KEY, null);
public static final String QUERY_TRANSIENT_STATE_UPDATE_KEY =
"exec.query.progress.update";
- public static final BooleanValidator QUERY_TRANSIENT_STATE_UPDATE = new
BooleanValidator(QUERY_TRANSIENT_STATE_UPDATE_KEY);
+ public static final BooleanValidator QUERY_TRANSIENT_STATE_UPDATE = new
BooleanValidator(QUERY_TRANSIENT_STATE_UPDATE_KEY, null);
public static final String PERSISTENT_TABLE_UMASK =
"exec.persistent_table.umask";
- public static final StringValidator PERSISTENT_TABLE_UMASK_VALIDATOR = new
StringValidator(PERSISTENT_TABLE_UMASK);
+ public static final StringValidator PERSISTENT_TABLE_UMASK_VALIDATOR = new
StringValidator(PERSISTENT_TABLE_UMASK, null);
/**
* Enables batch iterator (operator) validation. Validation is normally
enabled
@@ -674,7 +728,7 @@ private ExecConstants() {
* a "production" Drill instance.
*/
public static final String ENABLE_ITERATOR_VALIDATION_OPTION =
"debug.validate_iterators";
- public static final BooleanValidator ENABLE_ITERATOR_VALIDATOR = new
BooleanValidator(ENABLE_ITERATOR_VALIDATION_OPTION);
+ public static final BooleanValidator ENABLE_ITERATOR_VALIDATOR = new
BooleanValidator(ENABLE_ITERATOR_VALIDATION_OPTION, null);
/**
* Boot-time config option to enable validation. Primarily used for tests.
@@ -688,7 +742,7 @@ private ExecConstants() {
* each batch passed to each iterator.
*/
public static final String ENABLE_VECTOR_VALIDATION_OPTION =
"debug.validate_vectors";
- public static final BooleanValidator ENABLE_VECTOR_VALIDATOR = new
BooleanValidator(ENABLE_VECTOR_VALIDATION_OPTION);
+ public static final BooleanValidator ENABLE_VECTOR_VALIDATOR = new
BooleanValidator(ENABLE_VECTOR_VALIDATION_OPTION, null);
/**
* Boot-time config option to enable vector validation. Primarily used for
@@ -719,16 +773,16 @@ public static String bootDefaultFor(String name) {
/** Enables batch size statistics logging */
public static final String STATS_LOGGING_BATCH_SIZE_OPTION =
"drill.exec.stats.logging.batch_size";
- public static final BooleanValidator STATS_LOGGING_BATCH_SIZE_VALIDATOR =
new BooleanValidator(STATS_LOGGING_BATCH_SIZE_OPTION);
+ public static final BooleanValidator STATS_LOGGING_BATCH_SIZE_VALIDATOR =
new BooleanValidator(STATS_LOGGING_BATCH_SIZE_OPTION, null);
/** Enables fine-grained batch size statistics logging */
public static final String STATS_LOGGING_FG_BATCH_SIZE_OPTION =
"drill.exec.stats.logging.fine_grained.batch_size";
- public static final BooleanValidator STATS_LOGGING_BATCH_FG_SIZE_VALIDATOR =
new BooleanValidator(STATS_LOGGING_FG_BATCH_SIZE_OPTION);
+ public static final BooleanValidator STATS_LOGGING_BATCH_FG_SIZE_VALIDATOR =
new BooleanValidator(STATS_LOGGING_FG_BATCH_SIZE_OPTION, null);
/** Controls the list of operators for which batch sizing stats should be
enabled */
public static final String STATS_LOGGING_BATCH_OPERATOR_OPTION =
"drill.exec.stats.logging.enabled_operators";
- public static final StringValidator STATS_LOGGING_BATCH_OPERATOR_VALIDATOR =
new StringValidator(STATS_LOGGING_BATCH_OPERATOR_OPTION);
+ public static final StringValidator STATS_LOGGING_BATCH_OPERATOR_VALIDATOR =
new StringValidator(STATS_LOGGING_BATCH_OPERATOR_OPTION, null);
public static final String LIST_FILES_RECURSIVELY =
"storage.list_files_recursively";
- public static final BooleanValidator LIST_FILES_RECURSIVELY_VALIDATOR = new
BooleanValidator(LIST_FILES_RECURSIVELY);
+ public static final BooleanValidator LIST_FILES_RECURSIVELY_VALIDATOR = new
BooleanValidator(LIST_FILES_RECURSIVELY, null);
}
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassCompilerSelector.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassCompilerSelector.java
index 5255c530937..64ede6c070a 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassCompilerSelector.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassCompilerSelector.java
@@ -28,6 +28,7 @@
import org.apache.drill.exec.server.options.OptionMetaData;
import org.apache.drill.exec.server.options.OptionSet;
import org.apache.drill.exec.server.options.OptionValidator;
+import org.apache.drill.exec.server.options.OptionValidator.OptionDescription;
import org.apache.drill.exec.server.options.OptionValue;
import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator;
import org.apache.drill.exec.server.options.TypeValidators.LongValidator;
@@ -76,12 +77,15 @@
public static final String JAVA_COMPILER_OPTION = "exec.java_compiler";
public static final String JAVA_COMPILER_JANINO_MAXSIZE_OPTION =
"exec.java_compiler_janino_maxsize";
- public static final OptionValidator JAVA_COMPILER_JANINO_MAXSIZE = new
LongValidator(JAVA_COMPILER_JANINO_MAXSIZE_OPTION);
+ public static final OptionValidator JAVA_COMPILER_JANINO_MAXSIZE = new
LongValidator(JAVA_COMPILER_JANINO_MAXSIZE_OPTION,
+ new OptionDescription("See the exec.java_compiler option comment.
Accepts inputs of type LONG."));
public static final String JAVA_COMPILER_DEBUG_OPTION =
"exec.java_compiler_debug";
- public static final OptionValidator JAVA_COMPILER_DEBUG = new
BooleanValidator(JAVA_COMPILER_DEBUG_OPTION);
+ public static final OptionValidator JAVA_COMPILER_DEBUG = new
BooleanValidator(JAVA_COMPILER_DEBUG_OPTION,
+ new OptionDescription("Toggles the output of debug-level compiler error
messages in runtime generated code."));
- public static final StringValidator JAVA_COMPILER_VALIDATOR = new
StringValidator(JAVA_COMPILER_OPTION) {
+ public static final StringValidator JAVA_COMPILER_VALIDATOR = new
StringValidator(JAVA_COMPILER_OPTION,
+ new OptionDescription("Switches between DEFAULT, JDK, and JANINO mode
for the current session. Uses Janino by default for generated source code of
less than exec.java_compiler_janino_maxsize; otherwise, switches to the JDK
compiler.")) {
@Override
public void validate(final OptionValue v, final OptionMetaData metaData,
final OptionSet manager) {
super.validate(v, metaData, manager);
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
index ab3e771da6a..05548da997f 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
@@ -61,7 +61,7 @@
public final static String SCALAR_REPLACEMENT_OPTION =
"org.apache.drill.exec.compile.ClassTransformer.scalar_replacement";
public final static EnumeratedStringValidator SCALAR_REPLACEMENT_VALIDATOR =
new EnumeratedStringValidator(
- SCALAR_REPLACEMENT_OPTION, "try", "off", "try", "on");
+ SCALAR_REPLACEMENT_OPTION, null, "try", "off", "try", "on");
@VisibleForTesting // although we need it even if it weren't used in testing
public enum ScalarReplacementOption {
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
index 5a40ae42f31..46eb3f77528 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
@@ -23,6 +23,7 @@
import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
import org.apache.drill.exec.server.options.OptionManager;
import org.apache.drill.exec.server.options.OptionValidator;
+import org.apache.drill.exec.server.options.OptionValidator.OptionDescription;
import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator;
import
org.apache.drill.exec.server.options.TypeValidators.EnumeratedStringValidator;
import org.apache.drill.exec.server.options.TypeValidators.LongValidator;
@@ -51,72 +52,102 @@
// max off heap memory for planning (16G)
private static final long MAX_OFF_HEAP_ALLOCATION_IN_BYTES = 16l * 1024 *
1024 * 1024;
- public static final OptionValidator CONSTANT_FOLDING = new
BooleanValidator("planner.enable_constant_folding");
- public static final OptionValidator EXCHANGE = new
BooleanValidator("planner.disable_exchanges");
- public static final OptionValidator HASHAGG = new
BooleanValidator("planner.enable_hashagg");
- public static final OptionValidator STREAMAGG = new
BooleanValidator("planner.enable_streamagg");
- public static final OptionValidator TOPN = new
BooleanValidator("planner.enable_topn");
- public static final OptionValidator HASHJOIN = new
BooleanValidator("planner.enable_hashjoin");
- public static final OptionValidator MERGEJOIN = new
BooleanValidator("planner.enable_mergejoin");
- public static final OptionValidator NESTEDLOOPJOIN = new
BooleanValidator("planner.enable_nestedloopjoin");
- public static final OptionValidator MULTIPHASE = new
BooleanValidator("planner.enable_multiphase_agg");
- public static final OptionValidator BROADCAST = new
BooleanValidator("planner.enable_broadcast_join");
- public static final OptionValidator BROADCAST_THRESHOLD = new
PositiveLongValidator("planner.broadcast_threshold", MAX_BROADCAST_THRESHOLD);
- public static final OptionValidator BROADCAST_FACTOR = new
RangeDoubleValidator("planner.broadcast_factor", 0, Double.MAX_VALUE);
- public static final OptionValidator NESTEDLOOPJOIN_FACTOR = new
RangeDoubleValidator("planner.nestedloopjoin_factor", 0, Double.MAX_VALUE);
- public static final OptionValidator NLJOIN_FOR_SCALAR = new
BooleanValidator("planner.enable_nljoin_for_scalar_only");
- public static final OptionValidator JOIN_ROW_COUNT_ESTIMATE_FACTOR = new
RangeDoubleValidator("planner.join.row_count_estimate_factor", 0,
Double.MAX_VALUE);
- public static final OptionValidator MUX_EXCHANGE = new
BooleanValidator("planner.enable_mux_exchange");
- public static final OptionValidator ORDERED_MUX_EXCHANGE = new
BooleanValidator("planner.enable_ordered_mux_exchange");
- public static final OptionValidator DEMUX_EXCHANGE = new
BooleanValidator("planner.enable_demux_exchange");
- public static final OptionValidator PARTITION_SENDER_THREADS_FACTOR = new
LongValidator("planner.partitioner_sender_threads_factor");
- public static final OptionValidator PARTITION_SENDER_MAX_THREADS = new
LongValidator("planner.partitioner_sender_max_threads");
- public static final OptionValidator PARTITION_SENDER_SET_THREADS = new
LongValidator("planner.partitioner_sender_set_threads");
- public static final OptionValidator PRODUCER_CONSUMER = new
BooleanValidator("planner.add_producer_consumer");
- public static final OptionValidator PRODUCER_CONSUMER_QUEUE_SIZE = new
LongValidator("planner.producer_consumer_queue_size");
- public static final OptionValidator HASH_SINGLE_KEY = new
BooleanValidator("planner.enable_hash_single_key");
- public static final OptionValidator HASH_JOIN_SWAP = new
BooleanValidator("planner.enable_hashjoin_swap");
- public static final OptionValidator HASH_JOIN_SWAP_MARGIN_FACTOR = new
RangeDoubleValidator("planner.join.hash_join_swap_margin_factor", 0, 100);
+ public static final OptionValidator CONSTANT_FOLDING = new
BooleanValidator("planner.enable_constant_folding",
+ new OptionDescription("If one side of a filter condition is a constant
expression, constant folding evaluates the expression in the planning phase and
replaces the expression with the constant value. For example, Drill can rewrite
WHERE age + 5 < 42 as WHERE age < 37."));
+ public static final OptionValidator EXCHANGE = new
BooleanValidator("planner.disable_exchanges",
+ new OptionDescription("Toggles the state of hashing to a random
exchange."));
+ public static final OptionValidator HASHAGG = new
BooleanValidator("planner.enable_hashagg",
+ new OptionDescription("Enable hash aggregation; otherwise, Drill does a
sort-based aggregation. Writes to disk. Enable is recommended."));
+ public static final OptionValidator STREAMAGG = new
BooleanValidator("planner.enable_streamagg",
+ new OptionDescription("Sort-based operation. Writes to disk."));
+ public static final OptionValidator TOPN = new
BooleanValidator("planner.enable_topn", null);
+ public static final OptionValidator HASHJOIN = new
BooleanValidator("planner.enable_hashjoin",
+ new OptionDescription("Enable the memory hungry hash join. Drill assumes
that a query will have adequate memory to complete and tries to use the fastest
operations possible to complete the planned inner, left, right, or full outer
joins using a hash table. Does not write to disk. Disabling hash join allows
Drill to manage arbitrarily large data in a small memory footprint."));
+ public static final OptionValidator MERGEJOIN = new
BooleanValidator("planner.enable_mergejoin",
+ new OptionDescription("Sort-based operation. A merge join is used for
inner join, left and right outer joins. Inputs to the merge join must be
sorted. It reads the sorted input streams from both sides and finds matching
rows. Writes to disk."));
+ public static final OptionValidator NESTEDLOOPJOIN = new
BooleanValidator("planner.enable_nestedloopjoin",
+ new OptionDescription("Sort-based operation. Writes to disk."));
+ public static final OptionValidator MULTIPHASE = new
BooleanValidator("planner.enable_multiphase_agg",
+ new OptionDescription("Each minor fragment does a local aggregation in
phase 1, distributes on a hash basis using GROUP-BY keys partially aggregated
results to other fragments, and all the fragments perform a total aggregation
using this data."));
+ public static final OptionValidator BROADCAST = new
BooleanValidator("planner.enable_broadcast_join",
+ new OptionDescription("Changes the state of aggregation and join
operators. The broadcast join can be used for hash join, merge join and nested
loop join. Use to join a large (fact) table to relatively smaller (dimension)
tables. Do not disable."));
+ public static final OptionValidator BROADCAST_THRESHOLD = new
PositiveLongValidator("planner.broadcast_threshold", MAX_BROADCAST_THRESHOLD,
+ new OptionDescription("The maximum number of records allowed to be
broadcast as part of a query. After one million records, Drill reshuffles data
rather than doing a broadcast to one side of the join. Range: 0-2147483647"));
+ public static final OptionValidator BROADCAST_FACTOR = new
RangeDoubleValidator("planner.broadcast_factor", 0, Double.MAX_VALUE,
+ new OptionDescription("A heuristic parameter for influencing the
broadcast of records as part of a query."));
+ public static final OptionValidator NESTEDLOOPJOIN_FACTOR = new
RangeDoubleValidator("planner.nestedloopjoin_factor", 0, Double.MAX_VALUE,
+ new OptionDescription("A heuristic value for influencing the nested loop
join."));
+ public static final OptionValidator NLJOIN_FOR_SCALAR = new
BooleanValidator("planner.enable_nljoin_for_scalar_only",
+ new OptionDescription("Supports nested loop join planning where the
right input is scalar in order to enable NOT-IN, Inequality, Cartesian, and
uncorrelated EXISTS planning."));
+ public static final OptionValidator JOIN_ROW_COUNT_ESTIMATE_FACTOR = new
RangeDoubleValidator("planner.join.row_count_estimate_factor", 0,
Double.MAX_VALUE,
+ new OptionDescription("The factor for adjusting the estimated row count
when considering multiple join order sequences during the planning phase."));
+ public static final OptionValidator MUX_EXCHANGE = new
BooleanValidator("planner.enable_mux_exchange",
+ new OptionDescription("Toggles the state of hashing to a multiplexed
exchange."));
+ public static final OptionValidator ORDERED_MUX_EXCHANGE = new
BooleanValidator("planner.enable_ordered_mux_exchange",
+ null);
+ public static final OptionValidator DEMUX_EXCHANGE = new
BooleanValidator("planner.enable_demux_exchange",
+ new OptionDescription("Toggles the state of hashing to a demulitplexed
exchange."));
+ public static final OptionValidator PARTITION_SENDER_THREADS_FACTOR = new
LongValidator("planner.partitioner_sender_threads_factor",
+ new OptionDescription("A heuristic param to use to influence final
number of threads. The higher the value the fewer the number of threads."));
+ public static final OptionValidator PARTITION_SENDER_MAX_THREADS = new
LongValidator("planner.partitioner_sender_max_threads",
+ new OptionDescription("Upper limit of threads for outbound queuing."));
+ public static final OptionValidator PARTITION_SENDER_SET_THREADS = new
LongValidator("planner.partitioner_sender_set_threads",
+ new OptionDescription("Overwrites the number of threads used to send out
batches of records. Set to -1 to disable. Typically not changed."));
+ public static final OptionValidator PRODUCER_CONSUMER = new
BooleanValidator("planner.add_producer_consumer",
+ new OptionDescription("Increase prefetching of data from disk. Disable
for in-memory reads."));
+ public static final OptionValidator PRODUCER_CONSUMER_QUEUE_SIZE = new
LongValidator("planner.producer_consumer_queue_size",
+ new OptionDescription("How much data to prefetch from disk in record
batches out-of-band of query execution. The larger the queue size, the greater
the amount of memory that the queue and overall query execution consumes."));
+ public static final OptionValidator HASH_SINGLE_KEY = new
BooleanValidator("planner.enable_hash_single_key",
+ new OptionDescription("Each hash key is associated with a single
value."));
+ public static final OptionValidator HASH_JOIN_SWAP = new
BooleanValidator("planner.enable_hashjoin_swap",
+ new OptionDescription("Enables consideration of multiple join order
sequences during the planning phase. Might negatively affect the performance of
some queries due to inaccuracy of estimated row count especially after a
filter, join, or aggregation."));
+ public static final OptionValidator HASH_JOIN_SWAP_MARGIN_FACTOR = new
RangeDoubleValidator("planner.join.hash_join_swap_margin_factor", 0, 100,
+ new OptionDescription("The number of join order sequences to consider
during the planning phase."));
public static final String ENABLE_DECIMAL_DATA_TYPE_KEY =
"planner.enable_decimal_data_type";
- public static final BooleanValidator ENABLE_DECIMAL_DATA_TYPE = new
BooleanValidator(ENABLE_DECIMAL_DATA_TYPE_KEY);
- public static final OptionValidator HEP_OPT = new
BooleanValidator("planner.enable_hep_opt");
- public static final OptionValidator HEP_PARTITION_PRUNING = new
BooleanValidator("planner.enable_hep_partition_pruning");
+ public static final BooleanValidator ENABLE_DECIMAL_DATA_TYPE = new
BooleanValidator(ENABLE_DECIMAL_DATA_TYPE_KEY,
+ new OptionDescription("False disables the DECIMAL data type, including
casting to DECIMAL and reading DECIMAL types from Parquet and Hive."));
+ public static final OptionValidator HEP_OPT = new
BooleanValidator("planner.enable_hep_opt", null);
+ public static final OptionValidator HEP_PARTITION_PRUNING = new
BooleanValidator("planner.enable_hep_partition_pruning", null);
public static final OptionValidator PLANNER_MEMORY_LIMIT = new
RangeLongValidator("planner.memory_limit",
- INITIAL_OFF_HEAP_ALLOCATION_IN_BYTES, MAX_OFF_HEAP_ALLOCATION_IN_BYTES);
+ INITIAL_OFF_HEAP_ALLOCATION_IN_BYTES, MAX_OFF_HEAP_ALLOCATION_IN_BYTES,
+ new OptionDescription("Defines the maximum amount of direct memory
allocated to a query for planning. When multiple queries run concurrently, each
query is allocated the amount of memory set by this parameter.Increase the
value of this parameter and rerun the query if partition pruning failed due to
insufficient memory."));
public static final String UNIONALL_DISTRIBUTE_KEY =
"planner.enable_unionall_distribute";
- public static final BooleanValidator UNIONALL_DISTRIBUTE = new
BooleanValidator(UNIONALL_DISTRIBUTE_KEY);
+ public static final BooleanValidator UNIONALL_DISTRIBUTE = new
BooleanValidator(UNIONALL_DISTRIBUTE_KEY, null);
public static final OptionValidator IDENTIFIER_MAX_LENGTH =
new RangeLongValidator("planner.identifier_max_length", 128 /* A minimum
length is needed because option names are identifiers themselves */,
- Integer.MAX_VALUE);
+ Integer.MAX_VALUE,
+ new OptionDescription("A minimum length is
needed because option names are identifiers themselves."));
public static final DoubleValidator FILTER_MIN_SELECTIVITY_ESTIMATE_FACTOR =
new
MinRangeDoubleValidator("planner.filter.min_selectivity_estimate_factor",
- 0.0, 1.0, "planner.filter.max_selectivity_estimate_factor");
+ 0.0, 1.0, "planner.filter.max_selectivity_estimate_factor",
+ new OptionDescription("Available as of Drill 1.8. Sets the minimum
filter selectivity estimate to increase the parallelization of the major
fragment performing a join. This option is useful for deeply nested queries
with complicated predicates and serves as a workaround when statistics are
insufficient or unavailable. The selectivity can vary between 0 and 1. The
value of this option caps the estimated SELECTIVITY. The estimated ROWCOUNT is
derived by multiplying the estimated SELECTIVITY by the estimated ROWCOUNT of
the upstream operator. The estimated ROWCOUNT displays when you use the EXPLAIN
PLAN INCLUDING ALL ATTRIBUTES FOR command. This option does not control the
estimated ROWCOUNT of downstream operators (post FILTER). However, estimated
ROWCOUNTs may change because the operator ROWCOUNTs depend on their downstream
operators. The FILTER operator relies on the input of its immediate upstream
operator, for example SCAN, AGGREGATE. If two filters are present in a plan,
each filter may have a different estimated ROWCOUNT based on the immediate
upstream operator\'s estimated ROWCOUNT."));
public static final DoubleValidator FILTER_MAX_SELECTIVITY_ESTIMATE_FACTOR =
new
MaxRangeDoubleValidator("planner.filter.max_selectivity_estimate_factor",
- 0.0, 1.0, "planner.filter.min_selectivity_estimate_factor");
+ 0.0, 1.0, "planner.filter.min_selectivity_estimate_factor",
+ new OptionDescription("Available as of Drill 1.8. Sets the maximum
filter selectivity estimate. The selectivity can vary between 0 and 1. For more
details, see planner.filter.min_selectivity_estimate_factor."));
public static final String TYPE_INFERENCE_KEY =
"planner.enable_type_inference";
- public static final BooleanValidator TYPE_INFERENCE = new
BooleanValidator(TYPE_INFERENCE_KEY);
+ public static final BooleanValidator TYPE_INFERENCE = new
BooleanValidator(TYPE_INFERENCE_KEY, null);
public static final LongValidator IN_SUBQUERY_THRESHOLD =
- new PositiveLongValidator("planner.in_subquery_threshold",
Integer.MAX_VALUE); /* Same as Calcite's default IN List subquery size */
+ new PositiveLongValidator("planner.in_subquery_threshold",
Integer.MAX_VALUE, null); /* Same as Calcite's default IN List subquery size */
public static final String PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_KEY =
"planner.store.parquet.rowgroup.filter.pushdown.enabled";
- public static final BooleanValidator
PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING = new
BooleanValidator(PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_KEY);
+ public static final BooleanValidator
PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING = new
BooleanValidator(PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_KEY, null);
public static final String
PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_THRESHOLD_KEY =
"planner.store.parquet.rowgroup.filter.pushdown.threshold";
public static final PositiveLongValidator
PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_THRESHOLD = new
PositiveLongValidator(PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_THRESHOLD_KEY,
- Long.MAX_VALUE);
+ Long.MAX_VALUE, null);
public static final String QUOTING_IDENTIFIERS_KEY =
"planner.parser.quoting_identifiers";
public static final EnumeratedStringValidator QUOTING_IDENTIFIERS = new
EnumeratedStringValidator(
- QUOTING_IDENTIFIERS_KEY, Quoting.BACK_TICK.string,
Quoting.DOUBLE_QUOTE.string, Quoting.BRACKET.string);
+ QUOTING_IDENTIFIERS_KEY, null, Quoting.BACK_TICK.string,
Quoting.DOUBLE_QUOTE.string, Quoting.BRACKET.string);
/*
"planner.enable_unnest_lateral" is to allow users to choose enable
unnest+lateraljoin feature.
*/
public static final String ENABLE_UNNEST_LATERAL_KEY =
"planner.enable_unnest_lateral";
- public static final BooleanValidator ENABLE_UNNEST_LATERAL = new
BooleanValidator(ENABLE_UNNEST_LATERAL_KEY);
+ public static final BooleanValidator ENABLE_UNNEST_LATERAL = new
BooleanValidator(ENABLE_UNNEST_LATERAL_KEY, null);
/*
Enables rules that re-write query joins in the most optimal way.
@@ -140,10 +171,10 @@
Note: once hash and merge joins will allow non-equi join conditions,
the need to turn off join optimization may go away.
*/
- public static final BooleanValidator JOIN_OPTIMIZATION = new
BooleanValidator("planner.enable_join_optimization");
+ public static final BooleanValidator JOIN_OPTIMIZATION = new
BooleanValidator("planner.enable_join_optimization", null);
// for testing purpose
public static final String FORCE_2PHASE_AGGR_KEY =
"planner.force_2phase_aggr";
- public static final BooleanValidator FORCE_2PHASE_AGGR = new
BooleanValidator(FORCE_2PHASE_AGGR_KEY);
+ public static final BooleanValidator FORCE_2PHASE_AGGR = new
BooleanValidator(FORCE_2PHASE_AGGR_KEY, null);
public OptionManager options = null;
public FunctionImplementationRegistry functionImplementationRegistry = null;
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
index 37142204bdc..cf0afcfd6be 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
@@ -88,7 +88,7 @@
public static class InboundImpersonationPolicyValidator extends
StringValidator {
public InboundImpersonationPolicyValidator(String name) {
- super(name);
+ super(name, null);
}
@Override
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
index f7deaee20d2..98e44402a24 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
@@ -27,10 +27,12 @@
// Stored here as well as in the option static class to allow insertion of
option optionName into
// the error messages produced by the validator
private final String optionName;
+ private final OptionDescription description;
/** By default, if admin option value is not specified, it would be set to
false.*/
- public OptionValidator(String optionName) {
+ public OptionValidator(String optionName, OptionDescription description) {
this.optionName = optionName;
+ this.description = description;
}
/**
@@ -42,6 +44,14 @@ public String getOptionName() {
return optionName;
}
+ /**
+ * Get the option description (long and short)
+ * @return the description
+ */
+ public OptionDescription getOptionDescription() {
+ return description;
+ }
+
/**
* This function returns true if and only if this validator is meant for a
short-lived option.
*
@@ -89,4 +99,52 @@ public int getTtl() {
public String getConfigProperty() {
return ExecConstants.bootDefaultFor(getOptionName());
}
+
+ public static class OptionDescription {
+ private String description;
+ private String shortDescription;
+
+ /**
+ * Constructor for option's description
+ * @param description verbose format
+ */
+ public OptionDescription(String description) {
+ this(description, null);
+ }
+
+ /**
+ * Constructor for option's description
+ * @param description verbose format
+ * @param shortDescription short format
+ */
+ public OptionDescription(String description, String shortDescription) {
+ this.description = description;
+ this.shortDescription = shortDescription;
+ }
+
+ /**
+ * Get verbose description
+ * @return verbose description
+ */
+ public String getDescription() {
+ return description;
+ }
+
+ /**
+ * Get short description (typically for system tables)
+ * @return short description
+ */
+ public String getShortDescription() {
+ return shortDescription;
+ }
+
+ /**
+ * Check for explicit short description
+ * @return true if a short description explicitly exists
+ */
+ public boolean hasShortDescription() {
+ return (shortDescription != null);
+ }
+
+ }
}
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
index 6ffd0feaf62..106d4364a09 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
@@ -32,8 +32,8 @@
public static class PositiveLongValidator extends LongValidator {
protected final long max;
- public PositiveLongValidator(String name, long max) {
- super(name);
+ public PositiveLongValidator(String name, long max, OptionDescription
description) {
+ super(name, description);
this.max = max;
}
@@ -50,8 +50,8 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
public static class PowerOfTwoLongValidator extends PositiveLongValidator {
- public PowerOfTwoLongValidator(String name, long max) {
- super(name, max);
+ public PowerOfTwoLongValidator(String name, long max, OptionDescription
description) {
+ super(name, max, description);
}
@Override
@@ -73,8 +73,8 @@ private static boolean isPowerOfTwo(long num) {
protected final double min;
protected final double max;
- public RangeDoubleValidator(String name, double min, double max) {
- super(name);
+ public RangeDoubleValidator(String name, double min, double max,
OptionDescription description) {
+ super(name, description);
this.min = min;
this.max = max;
}
@@ -93,8 +93,8 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
public static class MinRangeDoubleValidator extends RangeDoubleValidator {
private final String maxValidatorName;
- public MinRangeDoubleValidator(String name, double min, double max, String
maxValidatorName) {
- super(name, min, max);
+ public MinRangeDoubleValidator(String name, double min, double max, String
maxValidatorName, OptionDescription description) {
+ super(name, min, max, description);
this.maxValidatorName = maxValidatorName;
}
@@ -114,8 +114,8 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
public static class MaxRangeDoubleValidator extends RangeDoubleValidator {
private final String minValidatorName;
- public MaxRangeDoubleValidator(String name, double min, double max, String
minValidatorName) {
- super(name, min, max);
+ public MaxRangeDoubleValidator(String name, double min, double max, String
minValidatorName, OptionDescription description) {
+ super(name, min, max, description);
this.minValidatorName = minValidatorName;
}
@@ -133,32 +133,32 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
}
public static class BooleanValidator extends TypeValidator {
- public BooleanValidator(String name) {
- super(name, Kind.BOOLEAN);
+ public BooleanValidator(String name, OptionDescription description) {
+ super(name, Kind.BOOLEAN, description);
}
}
public static class StringValidator extends TypeValidator {
- public StringValidator(String name) {
- super(name, Kind.STRING);
+ public StringValidator(String name, OptionDescription description) {
+ super(name, Kind.STRING, description);
}
}
public static class LongValidator extends TypeValidator {
- public LongValidator(String name) {
- super(name, Kind.LONG);
+ public LongValidator(String name, OptionDescription description) {
+ super(name, Kind.LONG, description);
}
}
public static class DoubleValidator extends TypeValidator {
- public DoubleValidator(String name) {
- super(name, Kind.DOUBLE);
+ public DoubleValidator(String name, OptionDescription description) {
+ super(name, Kind.DOUBLE, description);
}
}
public static class IntegerValidator extends LongValidator {
- public IntegerValidator(String name) {
- super(name);
+ public IntegerValidator(String name, OptionDescription description) {
+ super(name, description);
}
@Override
@@ -176,8 +176,8 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
private final long min;
private final long max;
- public RangeLongValidator(String name, long min, long max) {
- super(name);
+ public RangeLongValidator(String name, long min, long max,
OptionDescription description) {
+ super(name, description);
this.min = min;
this.max = max;
}
@@ -199,8 +199,8 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
public static class EnumeratedStringValidator extends StringValidator {
private final Set<String> valuesSet = Sets.newLinkedHashSet();
- public EnumeratedStringValidator(String name, String... values) {
- super(name);
+ public EnumeratedStringValidator(String name, OptionDescription
description, String... values) {
+ super(name, description);
for (String value : values) {
valuesSet.add(value.toLowerCase());
}
@@ -225,8 +225,8 @@ public void validate(final OptionValue v, final
OptionMetaData metaData, final O
public final String DEFAULT_ADMIN_USERS = "%drill_process_user%";
- public AdminUsersValidator(String name) {
- super(name);
+ public AdminUsersValidator(String name, OptionDescription description) {
+ super(name, description);
}
public String getAdminUsers(OptionManager optionManager) {
@@ -249,8 +249,8 @@ public String getAdminUsers(OptionManager optionManager) {
public final String DEFAULT_ADMIN_USER_GROUPS =
"%drill_process_user_groups%";
- public AdminUserGroupsValidator(String name) {
- super(name);
+ public AdminUserGroupsValidator(String name, OptionDescription
description) {
+ super(name, description);
}
public String getAdminUserGroups(OptionManager optionManager) {
@@ -271,8 +271,8 @@ public String getAdminUserGroups(OptionManager
optionManager) {
* the available number of processors and cpu load average
*/
public static class MaxWidthValidator extends LongValidator{
- public MaxWidthValidator(String name) {
- super(name);
+ public MaxWidthValidator(String name, OptionDescription description) {
+ super(name, description);
}
public int computeMaxWidth(double cpuLoadAverage, long maxWidth) {
@@ -292,8 +292,8 @@ public int computeMaxWidth(double cpuLoadAverage, long
maxWidth) {
public static abstract class TypeValidator extends OptionValidator {
private final Kind kind;
- public TypeValidator(final String name, final Kind kind) {
- super(name);
+ public TypeValidator(final String name, final Kind kind, final
OptionDescription description) {
+ super(name, description);
this.kind = kind;
}
@@ -312,6 +312,7 @@ public Kind getKind() {
return kind;
}
+ @Override
public String getConfigProperty() {
return ExecConstants.bootDefaultFor(getOptionName());
}
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
index 8f88e214eee..fd9950e6c4c 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
@@ -20,7 +20,12 @@
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.servlets.MetricsServlet;
import com.codahale.metrics.servlets.ThreadDumpServlet;
+import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
+import org.apache.drill.shaded.guava.com.google.common.io.Files;
+
+import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.drill.common.config.DrillConfig;
import org.apache.drill.common.exceptions.DrillException;
@@ -29,6 +34,10 @@
import org.apache.drill.exec.exception.DrillbitStartupException;
import org.apache.drill.exec.server.BootStrapContext;
import org.apache.drill.exec.server.Drillbit;
+import org.apache.drill.exec.server.options.OptionList;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.server.options.OptionValidator.OptionDescription;
+import org.apache.drill.exec.server.options.OptionValue;
import org.apache.drill.exec.server.rest.auth.DrillErrorHandler;
import org.apache.drill.exec.server.rest.auth.DrillHttpSecurityHandlerProvider;
import org.apache.drill.exec.ssl.SSLConfigBuilder;
@@ -68,7 +77,11 @@
import javax.servlet.http.HttpSession;
import javax.servlet.http.HttpSessionEvent;
import javax.servlet.http.HttpSessionListener;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
import java.io.IOException;
+import java.io.InputStream;
import java.math.BigInteger;
import java.net.BindException;
import java.security.KeyPair;
@@ -76,14 +89,18 @@
import java.security.KeyStore;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
+import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
+import java.util.List;
/**
* Wrapper class around jetty based webserver.
*/
public class WebServer implements AutoCloseable {
private static final org.slf4j.Logger logger =
org.slf4j.LoggerFactory.getLogger(WebServer.class);
+ private static final String OPTIONS_DESCRIBE_JS = "options.describe.js";
+ private static final String OPTIONS_DESCRIBE_TEMPLATE_JS =
"options.describe.template.js";
private static final int PORT_HUNT_TRIES = 100;
private static final String BASE_STATIC_PATH = "/rest/static/";
@@ -96,6 +113,22 @@
private Server embeddedJetty;
+ private File tmpJavaScriptDir;
+
+ public File getTmpJavaScriptDir() {
+ if (tmpJavaScriptDir == null) {
+ tmpJavaScriptDir = Files.createTempDir();
+ tmpJavaScriptDir.deleteOnExit();
+ //Perform All auto generated files at this point
+ try {
+ generateOptionsDescriptionJSFile();
+ } catch (IOException e) {
+ logger.error("Unable to create temp dir for JavaScripts. {}", e);
+ }
+ }
+ return tmpJavaScriptDir;
+ }
+
/**
* Create Jetty based web server.
*
@@ -195,6 +228,16 @@ private ServletContextHandler
createServletContextHandler(final boolean authEnab
staticHolder.setInitParameter("pathInfoOnly", "true");
servletContextHandler.addServlet(staticHolder, "/static/*");
+ //Add Local path resource (This will allow access to dynamically created
files like JavaScript)
+ final ServletHolder dynamicHolder = new ServletHolder("dynamic",
DefaultServlet.class);
+ //Skip if unable to get a temp directory (e.g. during Unit tests)
+ if (getTmpJavaScriptDir() != null) {
+ dynamicHolder.setInitParameter("resourceBase",
getTmpJavaScriptDir().getAbsolutePath());
+ dynamicHolder.setInitParameter("dirAllowed", "true");
+ dynamicHolder.setInitParameter("pathInfoOnly", "true");
+ servletContextHandler.addServlet(dynamicHolder, "/dynamic/*");
+ }
+
if (authEnabled) {
//DrillSecurityHandler is used to support SPNEGO and FORM authentication
together
servletContextHandler.setSecurityHandler(new
DrillHttpSecurityHandlerProvider(config, workManager.getContext()));
@@ -409,5 +452,47 @@ public void close() throws Exception {
if (embeddedJetty != null) {
embeddedJetty.stop();
}
+ //Deleting temp directory
+ FileUtils.deleteDirectory(getTmpJavaScriptDir());
+ }
+
+ private static final String FILE_CONTENT_FOOTER = "};";
+
+ //Generate Options Description JavaScript
+ private void generateOptionsDescriptionJSFile() throws IOException {
+ //Obtain list of Options & their descriptions
+ OptionManager optionManager =
this.drillbit.getContext().getOptionManager();
+ OptionList publicOptions = optionManager.getPublicOptionList();
+ List<OptionValue> options = Lists.newArrayList(publicOptions);
+ Collections.sort(options);
+ int numLeftToWrite = options.size();
+
+ //Template source Javascript file
+ InputStream optionsDescripTemplateStream =
Resource.newClassPathResource(OPTIONS_DESCRIBE_TEMPLATE_JS).getInputStream();
+ //Generated file
+ File optionsDescriptionFile = new File(getTmpJavaScriptDir(),
OPTIONS_DESCRIBE_JS);
+ optionsDescriptionFile.deleteOnExit();
+ //Create a copy of a template and write with that!
+ java.nio.file.Files.copy(optionsDescripTemplateStream,
optionsDescriptionFile.toPath());
+ logger.info("Will write {} descriptions to {}", numLeftToWrite,
optionsDescriptionFile.getAbsolutePath());
+
+ try (BufferedWriter writer = new BufferedWriter(new
FileWriter(optionsDescriptionFile, true))) {
+ //Iterate through options
+ for (OptionValue option : options) {
+ numLeftToWrite--;
+ String optionName = option.getName();
+ OptionDescription optionDescription =
optionManager.getOptionDefinition(optionName).getValidator().getOptionDescription();
+ if (optionDescription != null) {
+ //Note: We don't need to worry about short descriptions for WebUI,
since they will never be explicitly accessed from the map
+ writer.append(" \"").append(optionName).append("\" : \"")
+
.append(StringEscapeUtils.escapeEcmaScript(optionDescription.getDescription()))
+ .append( numLeftToWrite > 0 ? "\"," : "\"");
+ writer.newLine();
+ }
+ }
+ writer.append(FILE_CONTENT_FOOTER);
+ writer.newLine();
+ writer.flush();
+ }
}
}
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
index 26df8465172..a8c3c84283a 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/ExtendedOptionIterator.java
@@ -27,6 +27,8 @@
import org.apache.drill.exec.ops.FragmentContext;
import org.apache.drill.exec.server.options.OptionManager;
import org.apache.drill.exec.server.options.OptionValue;
+import org.apache.drill.exec.server.options.OptionValidator.OptionDescription;
+import org.apache.drill.exec.server.options.OptionValue.AccessibleScopes;
import org.apache.drill.exec.server.options.OptionValue.Kind;
import org.apache.drill.exec.server.options.OptionValue.OptionScope;
import org.apache.drill.exec.store.pojo.NonNullable;
@@ -53,14 +55,26 @@
* only the value set at SESSION level.
*/
public class ExtendedOptionIterator implements Iterator<Object> {
-// private static final org.slf4j.Logger logger =
org.slf4j.LoggerFactory.getLogger(OptionIterator.class);
+ //private static final org.slf4j.Logger logger =
org.slf4j.LoggerFactory.getLogger(ExtendedOptionIterator.class);
private final OptionManager fragmentOptions;
private final Iterator<OptionValue> mergedOptions;
+ private Map<OptionValue.Kind, String> typeMapping;
+ private Map<OptionScope, Integer> preference;
+ private static final int SHORT_DESCRIP_MAX_SIZE = 110;
public ExtendedOptionIterator(FragmentContext context, boolean internal) {
fragmentOptions = context.getOptions();
- final Iterator<OptionValue> optionList;
+ preference = new HashMap<OptionScope, Integer>();
+ preference.put(OptionScope.SESSION, 0);
+ preference.put(OptionScope.SYSTEM, 1);
+ preference.put(OptionScope.BOOT, 2);
+
+ typeMapping = new HashMap<Kind, String>();
+ typeMapping.put(Kind.STRING, "VARCHAR");
+ typeMapping.put(Kind.DOUBLE, "FLOAT");
+ typeMapping.put(Kind.LONG, "BIGINT");
+ typeMapping.put(Kind.BOOLEAN, "BIT");
if (!internal) {
mergedOptions =
sortOptions(fragmentOptions.getPublicOptionList().iterator());
@@ -76,11 +90,6 @@ public ExtendedOptionIterator(FragmentContext context,
boolean internal) {
public Iterator<OptionValue> sortOptions(Iterator<OptionValue> options) {
List<OptionValue> optionslist = Lists.newArrayList(options);
HashMap<String, OptionValue> optionsmap = new HashMap<>();
- final Map<OptionScope, Integer> preference = new HashMap<OptionScope,
Integer>() {{
- put(OptionScope.SESSION, 0);
- put(OptionScope.SYSTEM, 1);
- put(OptionScope.BOOT, 2);
- }};
for (OptionValue option : optionslist) {
if (optionsmap.containsKey(option.getName())) {
@@ -116,14 +125,17 @@ public boolean hasNext() {
@Override
public ExtendedOptionValueWrapper next() {
final OptionValue value = mergedOptions.next();
- final HashMap<OptionValue.Kind,String> typeMapping = new HashMap() {{
- put(Kind.STRING,"VARCHAR");
- put(Kind.DOUBLE,"FLOAT");
- put(Kind.LONG,"BIGINT");
- put(Kind.BOOLEAN,"BIT");
-
- }};
- return new ExtendedOptionValueWrapper(value.name,
typeMapping.get(value.kind),
value.accessibleScopes,value.getValue().toString(), value.scope);
+
+ final Status status;
+ if (value.accessibleScopes == AccessibleScopes.BOOT) {
+ status = Status.BOOT;
+ } else {
+ final OptionValue def = fragmentOptions.getDefault(value.name);
+ status = (value.equalsIgnoreType(def) ? Status.DEFAULT : Status.CHANGED);
+ }
+
+ return new ExtendedOptionValueWrapper(value.name,
typeMapping.get(value.kind),
value.accessibleScopes,value.getValue().toString(), status, value.scope,
+ getShortDescription(value.name));
}
public enum Status {
@@ -134,7 +146,6 @@ public ExtendedOptionValueWrapper next() {
* Wrapper class for Extended Option Value
*/
public static class ExtendedOptionValueWrapper {
-
@NonNullable
public final String name;
@NonNullable
@@ -142,17 +153,39 @@ public ExtendedOptionValueWrapper next() {
@NonNullable
public final OptionValue.AccessibleScopes accessibleScopes;
public final String val;
+ public final Status status;
@NonNullable
public final OptionScope optionScope;
+ public final String description;
-
- public ExtendedOptionValueWrapper(final String name, final String kind,
final OptionValue.AccessibleScopes type, final String value, final OptionScope
scope) {
+ public ExtendedOptionValueWrapper(final String name, final String kind,
final OptionValue.AccessibleScopes type, final String value, final Status
status, final OptionScope scope,
+ final String description) {
this.name = name;
this.kind = kind;
this.accessibleScopes = type;
this.val = value;
+ this.status = status;
this.optionScope = scope;
+ this.description = description;
+ }
+ }
+
+ //Extract a limited length from the original description if not available
+ private String getShortDescription(String name) {
+ OptionDescription optionDescription =
fragmentOptions.getOptionDefinition(name).getValidator().getOptionDescription();
+ if (optionDescription == null) {
+ return "";
+ }
+ String description = null;
+ if (optionDescription.hasShortDescription()) {
+ description = optionDescription.getShortDescription();
+ } else {
+ description = optionDescription.getDescription();
+ if (description.length() > SHORT_DESCRIP_MAX_SIZE) {
+ return description.substring(0,
SHORT_DESCRIP_MAX_SIZE-3).concat("..."); //Append ellipsis (trailing dots)
+ }
}
+ return description;
}
@Override
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
index 8882f2db2d9..8bdfb7f5162 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTable.java
@@ -140,6 +140,4 @@ public boolean isDistributed() {
public Class<?> getPojoClass() {
return pojoClass;
}
-
-
}
diff --git
a/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
b/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
index b8ad9b6f6ab..29a3a2aa5b4 100644
---
a/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
+++
b/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
@@ -78,9 +78,10 @@
* Constructor for controls option validator.
* @param name the name of the validator
* @param ttl the number of queries for which this option should be valid
+ * @param description Description of the option
*/
- public ControlsOptionValidator(final String name, final int ttl) {
- super(name, OptionValue.Kind.STRING);
+ public ControlsOptionValidator(final String name, final int ttl,
OptionDescription description) {
+ super(name, OptionValue.Kind.STRING, description);
assert ttl > 0;
this.ttl = ttl;
}
diff --git a/exec/java-exec/src/main/resources/options.describe.template.js
b/exec/java-exec/src/main/resources/options.describe.template.js
new file mode 100644
index 00000000000..a5ffb2bfda5
--- /dev/null
+++ b/exec/java-exec/src/main/resources/options.describe.template.js
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor
+ * license agreements. See the NOTICE file distributed with this work for
additional
+ * information regarding copyright ownership. The ASF licenses this file to
+ * You under the Apache License, Version 2.0 (the "License"); you may not use
+ * this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
+ * by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS
+ * OF ANY KIND, either express or implied. See the License for the specific
+ * language governing permissions and limitations under the License.
+ */
+
+//Returns description
+function getDescription(optionName) {
+ if (!(optionName in optsDescripMap)) {
+ return "";
+ }
+ return optsDescripMap[optionName];
+}
+
+//List of options and their descriptions (Short entries are limited to 105
char)
+var optsDescripMap = {
diff --git a/exec/java-exec/src/main/resources/rest/options.ftl
b/exec/java-exec/src/main/resources/rest/options.ftl
index 7a1deac0da4..9d6a684eb44 100644
--- a/exec/java-exec/src/main/resources/rest/options.ftl
+++ b/exec/java-exec/src/main/resources/rest/options.ftl
@@ -19,34 +19,126 @@
-->
<#include "*/generic.ftl">
<#macro page_head>
+ <script type="text/javascript" language="javascript"
src="/static/js/jquery.dataTables-1.10.16.min.js"> </script>
+ <script type="text/javascript" language="javascript"
src="/static/js/dataTables.colVis-1.1.0.min.js"></script>
+ <!-- List of Option Descriptions -->
+ <script src="/dynamic/options.describe.js"></script>
+ <link href="/static/css/dataTables.colVis-1.1.0.min.css" rel="stylesheet">
+ <link href="/static/css/dataTables.jqueryui.css" rel="stylesheet">
+ <link href="/static/css/jquery-ui-1.10.3.min.css" rel="stylesheet">
+<style>
+/* DataTables Sorting: inherited via sortable class */
+table.sortable thead .sorting,.sorting_asc,.sorting_desc {
+ background-repeat: no-repeat;
+ background-position: center right;
+ cursor: pointer;
+}
+/* Sorting Symbols */
+table.sortable thead .sorting { background-image:
url("/static/img/black-unsorted.gif"); }
+table.sortable thead .sorting_asc { background-image:
url("/static/img/black-asc.gif"); }
+table.sortable thead .sorting_desc { background-image:
url("/static/img/black-desc.gif"); }
+</style>
</#macro>
<#macro page_body>
<a href="/queries">back</a><br/>
<div class="page-header">
</div>
- <h4>System options</h4>
+ <div class="btn-group btn-group-sm" style="display:inline-block;">
+ <button type="button" class="btn" style="cursor:default;font-weight:bold;" >
Quick Filters </button>
+ <button type="button" class="btn btn-info"
onclick="inject(this.innerHTML);">planner</button>
+ <button type="button" class="btn btn-info"
onclick="inject(this.innerHTML);">store</button>
+ <button type="button" class="btn btn-info"
onclick="inject(this.innerHTML);">parquet</button>
+ <button type="button" class="btn btn-info"
onclick="inject(this.innerHTML);">hashagg</button>
+ <button type="button" class="btn btn-info"
onclick="inject(this.innerHTML);">hashjoin</button>
+ </div>
+ <div class="col-xs-4">
+ <input id="searchBox" name="searchBox" class="form-control" type="text"
value="" placeholder="Search options...">
+ </div>
+
<div class="table-responsive">
- <table class="table table-hover">
+ <table id='optionsTbl' class="table table-striped table-condensed display
sortable" style="table-layout: auto; width=100%;">
+ <thead>
+ <tr>
+ <th style="width:30%">OPTION</th>
+ <th style="width:25%">VALUE</th>
+ <th style="width:45%">DESCRIPTION</th>
+ </tr>
+ </thead>
<tbody>
+ <#assign i = 1>
<#list model as option>
- <tr>
- <td style="border:none;">${option.getName()}</td>
- <td style="border:none;">
+ <tr id="row-${i}">
+ <td style="font-family:Courier New; vertical-align:middle"
id='optionName'>${option.getName()}</td>
+ <td>
<form class="form-inline" role="form"
action="/option/${option.getName()}" method="POST">
<div class="form-group">
- <input type="text" class="form-control" name="value"
value="${option.getValueAsString()}">
- <input type="hidden" class="form-control" name="kind"
value="${option.getKind()}">
- <input type="hidden" class="form-control" name="name"
value="${option.getName()}">
+ <input type="hidden" class="form-control" name="kind"
value="${option.getKind()}">
+ <input type="hidden" class="form-control" name="name"
value="${option.getName()}">
+ <div class="input-group input-sm">
+ <#if option.getKind() == "BOOLEAN" >
+ <select class="form-control" name="value">
+ <option value="false" ${(option.getValueAsString() ==
"false")?string("selected", "")}>false</option>
+ <option value="true" ${(option.getValueAsString() ==
"true")?string("selected", "")}>true</option>
+ </select>
+ <#else>
+ <input type="text" class="form-control"
placeholder="${option.getValueAsString()}" name="value"
value="${option.getValueAsString()}">
+ </#if>
+ <div class="input-group-btn">
+ <button class="btn btn-default"
type="submit">Update</button>
+ </div>
+ </div>
</div>
- <button type="submit" class="btn btn-default">Update</button>
</form>
</td>
+ <td id='description'></td>
</tr>
+ <#assign i = i + 1>
</#list>
</tbody>
</table>
</div>
-</#macro>
+ <script>
+ //Defining the DataTable with a handle
+ var optTable = $('#optionsTbl').DataTable( {
+ "lengthChange": false,
+ "pageLength": -1,
+ "dom": 'lrit',
+ "jQueryUI" : true,
+ "searching": true,
+ "language": {
+ "lengthMenu": "Display _MENU_ records per page",
+ "zeroRecords": "No matching options found. Check search entry",
+ "info": "Found _END_ matches out of _MAX_ options",
+ "infoEmpty": "No options available",
+ "infoFiltered": ""
+ }
+ } );
+ //Draw when the table is ready
+ $(document).ready(function() {
+ //Inject Descriptions for table
+ let size = $('#optionsTbl tbody tr').length;
+ for (i = 1; i <= size; i++) {
+ let currRow = $("#row-"+i);
+ let optionName = currRow.find("#optionName").text();
+ let setOptDescrip =
currRow.find("#description").text(getDescription(optionName));
+ }
+
+ // Draw DataTable
+ optTable.rows().invalidate().draw();
+ });
+
+ //EventListener to update table when changes are detected
+ $('#searchBox').on('keyup change', function () {
+ optTable.search(this.value).draw().toString();
+ });
+
+ //Inject word and force table redraw
+ function inject(searchTerm) {
+ $('#searchBox').val(searchTerm);
+ optTable.search(searchTerm).draw().toString();
+ }
+ </script>
+</#macro>
<@page_html/>
diff --git
a/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
index 5b9ae97c30d..8fec57680bf 100644
---
a/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
+++
b/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
@@ -48,7 +48,7 @@
public BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
public static OptionDefinition createMockPropOptionDefinition() {
- return new OptionDefinition(new
TypeValidators.StringValidator(MOCK_PROPERTY), new
OptionMetaData(OptionValue.AccessibleScopes.ALL, false, true));
+ return new OptionDefinition(new
TypeValidators.StringValidator(MOCK_PROPERTY, null), new
OptionMetaData(OptionValue.AccessibleScopes.ALL, false, true));
}
@Test
diff --git
a/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
b/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
index c5b607b1f4e..e6b1416be00 100644
---
a/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
+++
b/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestMetadataProvider.java
@@ -250,7 +250,7 @@ public void columns() throws Exception {
assertEquals(RequestStatus.OK, resp.getStatus());
List<ColumnMetadata> columns = resp.getColumnsList();
- assertEquals(130, columns.size());
+ assertEquals(134, columns.size());
// too many records to verify the output.
}
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
> UI options grouping and filtering & Metrics hints
> -------------------------------------------------
>
> Key: DRILL-5735
> URL: https://issues.apache.org/jira/browse/DRILL-5735
> Project: Apache Drill
> Issue Type: Improvement
> Components: Web Server
> Affects Versions: 1.9.0, 1.10.0, 1.11.0
> Reporter: Muhammad Gelbana
> Assignee: Kunal Khatua
> Priority: Major
> Labels: doc-impacting, ready-to-commit
> Fix For: 1.15.0
>
>
> I'm thinking of some UI improvements that could make all the difference for
> users trying to optimize low-performing queries.
> h2. Options
> h3. Grouping
> We can organize the options to be grouped by their scope of effect, this will
> help users easily locate the options they may need to tune.
> h3. Filtering
> Since the options are a lot, we can add a filtering mechanism (i.e. string
> search or group\scope filtering) so the user can filter out the options he's
> not interested in. To provide more benefit than the grouping idea mentioned
> above, filtering may include keywords also and not just the option name,
> since the user may not be aware of the name of the option he's looking for.
> h2. Metrics
> I'm referring here to the metrics page and the query execution plan page that
> displays the overview section and major\minor fragments metrics. We can show
> hints for each metric such as:
> # What does it represent in more details.
> # What option\scope-of-options to tune (increase ? decrease ?) to improve the
> performance reported by this metric.
> # May be even provide a small dialog to quickly allow the modification of the
> related option(s) to that metric
--
This message was sent by Atlassian JIRA
(v7.6.3#76005)