[
https://issues.apache.org/jira/browse/DRILL-4699?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15557175#comment-15557175
]
ASF GitHub Bot commented on DRILL-4699:
---------------------------------------
Github user paul-rogers commented on a diff in the pull request:
https://github.com/apache/drill/pull/536#discussion_r82495374
--- Diff:
exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java ---
@@ -146,89 +158,121 @@
* |- bar - a.parquet
* |- baz - b.parquet
*/
- String FILESYSTEM_PARTITION_COLUMN_LABEL =
"drill.exec.storage.file.partition.column.label";
- OptionValidator FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new
StringValidator(FILESYSTEM_PARTITION_COLUMN_LABEL, "dir");
+ public static final String FILESYSTEM_PARTITION_COLUMN_LABEL =
"drill.exec.storage.file.partition.column.label";
+ public static final OptionValidator
FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new StringValidator(
+ FILESYSTEM_PARTITION_COLUMN_LABEL, "dir",
+ "The column label for directory levels in results of queries of
files in a directory. Accepts a string input.");
/**
* Implicit file columns
*/
- String IMPLICIT_FILENAME_COLUMN_LABEL =
"drill.exec.storage.implicit.filename.column.label";
- OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR = new
StringValidator(IMPLICIT_FILENAME_COLUMN_LABEL, "filename");
- String IMPLICIT_SUFFIX_COLUMN_LABEL =
"drill.exec.storage.implicit.suffix.column.label";
- OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR = new
StringValidator(IMPLICIT_SUFFIX_COLUMN_LABEL, "suffix");
- String IMPLICIT_FQN_COLUMN_LABEL =
"drill.exec.storage.implicit.fqn.column.label";
- OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR = new
StringValidator(IMPLICIT_FQN_COLUMN_LABEL, "fqn");
- String IMPLICIT_FILEPATH_COLUMN_LABEL =
"drill.exec.storage.implicit.filepath.column.label";
- OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR = new
StringValidator(IMPLICIT_FILEPATH_COLUMN_LABEL, "filepath");
-
- String JSON_READ_NUMBERS_AS_DOUBLE = "store.json.read_numbers_as_double";
- BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new
BooleanValidator(JSON_READ_NUMBERS_AS_DOUBLE, false);
-
- String MONGO_ALL_TEXT_MODE = "store.mongo.all_text_mode";
- OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR = new
BooleanValidator(MONGO_ALL_TEXT_MODE, false);
- String MONGO_READER_READ_NUMBERS_AS_DOUBLE =
"store.mongo.read_numbers_as_double";
- OptionValidator MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new
BooleanValidator(MONGO_READER_READ_NUMBERS_AS_DOUBLE, false);
- String MONGO_BSON_RECORD_READER = "store.mongo.bson.record.reader";
- OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new
BooleanValidator(MONGO_BSON_RECORD_READER, true);
-
- BooleanValidator ENABLE_UNION_TYPE = new
BooleanValidator("exec.enable_union_type", false);
+ public static final String IMPLICIT_FILENAME_COLUMN_LABEL =
"drill.exec.storage.implicit.filename.column.label";
+ public static final OptionValidator
IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR = new StringValidator(
+ IMPLICIT_FILENAME_COLUMN_LABEL, "filename");
+ public static final String IMPLICIT_SUFFIX_COLUMN_LABEL =
"drill.exec.storage.implicit.suffix.column.label";
+ public static final OptionValidator
IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR = new StringValidator(
+ IMPLICIT_SUFFIX_COLUMN_LABEL, "suffix");
+ public static final String IMPLICIT_FQN_COLUMN_LABEL =
"drill.exec.storage.implicit.fqn.column.label";
+ public static final OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR
= new StringValidator(
+ IMPLICIT_FQN_COLUMN_LABEL, "fqn");
+ public static final String IMPLICIT_FILEPATH_COLUMN_LABEL =
"drill.exec.storage.implicit.filepath.column.label";
+ public static final OptionValidator
IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR = new StringValidator(
+ IMPLICIT_FILEPATH_COLUMN_LABEL, "filepath");
+
+ public static final String JSON_READ_NUMBERS_AS_DOUBLE =
"store.json.read_numbers_as_double";
+ public static final BooleanValidator
JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(
+ JSON_READ_NUMBERS_AS_DOUBLE, false,
+ "Reads numbers with or without a decimal point as DOUBLE. Prevents
schema change errors.");
+
+ public static final String MONGO_ALL_TEXT_MODE =
"store.mongo.all_text_mode";
+ public static final OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR
= new BooleanValidator(MONGO_ALL_TEXT_MODE,
+ false);
+ public static final String MONGO_READER_READ_NUMBERS_AS_DOUBLE =
"store.mongo.read_numbers_as_double";
+ public static final OptionValidator
MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(
+ MONGO_READER_READ_NUMBERS_AS_DOUBLE, false);
+ public static final String MONGO_BSON_RECORD_READER =
"store.mongo.bson.record.reader";
+ public static final OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR =
new BooleanValidator(
+ MONGO_BSON_RECORD_READER, true);
+
+ public static final BooleanValidator ENABLE_UNION_TYPE = new
BooleanValidator("exec.enable_union_type", false);
// TODO: We need to add a feature that enables storage plugins to add
their own options. Currently we have to declare
// in core which is not right. Move this option and above two mongo
plugin related options once we have the feature.
- String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS =
"store.hive.optimize_scan_with_native_readers";
- OptionValidator HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR =
+ public static final String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS =
"store.hive.optimize_scan_with_native_readers";
+ public static final OptionValidator
HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR =
new BooleanValidator(HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS, false);
- String SLICE_TARGET = "planner.slice_target";
- long SLICE_TARGET_DEFAULT = 100000l;
- PositiveLongValidator SLICE_TARGET_OPTION = new
PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE,
- SLICE_TARGET_DEFAULT);
+ public static final String SLICE_TARGET = "planner.slice_target";
+ public static final String SLICE_TARGET_DESCRIPTION =
+ "The number of records manipulated within a fragment before Drill
parallelizes operations.";
+ public static final long SLICE_TARGET_DEFAULT = 100000L;
+ public static final PositiveLongValidator SLICE_TARGET_OPTION = new
PositiveLongValidator(SLICE_TARGET,
+ Long.MAX_VALUE, SLICE_TARGET_DEFAULT, SLICE_TARGET_DESCRIPTION);
- String CAST_TO_NULLABLE_NUMERIC =
"drill.exec.functions.cast_empty_string_to_null";
- OptionValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new
BooleanValidator(CAST_TO_NULLABLE_NUMERIC, false);
+ public static final String CAST_TO_NULLABLE_NUMERIC =
"drill.exec.functions.cast_empty_string_to_null";
+ public static final OptionValidator CAST_TO_NULLABLE_NUMERIC_OPTION =
new BooleanValidator(CAST_TO_NULLABLE_NUMERIC,
+ false, "In a text file, treat empty fields as NULL values instead of
empty string.");
/**
* HashTable runtime settings
*/
- String MIN_HASH_TABLE_SIZE_KEY = "exec.min_hash_table_size";
- PositiveLongValidator MIN_HASH_TABLE_SIZE = new
PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY,
HashTable.DEFAULT_INITIAL_CAPACITY);
- String MAX_HASH_TABLE_SIZE_KEY = "exec.max_hash_table_size";
- PositiveLongValidator MAX_HASH_TABLE_SIZE = new
PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY,
HashTable.MAXIMUM_CAPACITY);
+ public static final String MIN_HASH_TABLE_SIZE_KEY =
"exec.min_hash_table_size";
+ public static final PositiveLongValidator MIN_HASH_TABLE_SIZE = new
PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY,
+ HashTable.MAXIMUM_CAPACITY, HashTable.DEFAULT_INITIAL_CAPACITY,
+ "Starting size in bucketsfor hash tables. Increase according to
available memory to improve performance." +
+ " Increasing for very large aggregations or joins when you have
large amounts of memory for Drill to" +
+ " use. Range: 0 - " + HashTable.MAXIMUM_CAPACITY);
+ public static final String MAX_HASH_TABLE_SIZE_KEY =
"exec.max_hash_table_size";
+ public static final PositiveLongValidator MAX_HASH_TABLE_SIZE = new
PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY,
+ HashTable.MAXIMUM_CAPACITY, HashTable.MAXIMUM_CAPACITY,
+ "Ending size in buckets for hash tables. Range: 0 - " +
HashTable.MAXIMUM_CAPACITY);
/**
* Limits the maximum level of parallelization to this factor time the
number of Drillbits
*/
- String MAX_WIDTH_PER_NODE_KEY = "planner.width.max_per_node";
- OptionValidator MAX_WIDTH_PER_NODE = new
PositiveLongValidator(MAX_WIDTH_PER_NODE_KEY, Integer.MAX_VALUE, (long)
Math.ceil(Runtime.getRuntime().availableProcessors() * 0.70));
+ public static final String MAX_WIDTH_PER_NODE_KEY =
"planner.width.max_per_node";
+ public static final OptionValidator MAX_WIDTH_PER_NODE = new
PositiveLongValidator(MAX_WIDTH_PER_NODE_KEY,
+ Integer.MAX_VALUE, (long)
Math.ceil(Runtime.getRuntime().availableProcessors() * 0.70),
+ "Maximum number of threads that can run in parallel for a query on a
node. A slice is an individual thread. " +
+ "This number indicates the maximum number of slices per query
for the query’s major fragment on a node");
/**
* The maximum level or parallelization any stage of the query can do.
Note that while this
* might be the number of active Drillbits, realistically, this could be
well beyond that
* number of we want to do things like speed results return.
*/
- String MAX_WIDTH_GLOBAL_KEY = "planner.width.max_per_query";
- OptionValidator MAX_WIDTH_GLOBAL = new
PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY, Integer.MAX_VALUE, 1000);
+ public static final String MAX_WIDTH_GLOBAL_KEY =
"planner.width.max_per_query";
+ public static final OptionValidator MAX_WIDTH_GLOBAL = new
PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY,
+ Integer.MAX_VALUE, 1000,
+ "Same as max per node but applies to the query as executed by the
entire cluster. For example, this value might" +
+ " be the number of active Drillbits, or a higher number to
return results faster.");
/**
* Factor by which a node with endpoint affinity will be favored while
creating assignment
*/
- String AFFINITY_FACTOR_KEY = "planner.affinity_factor";
- OptionValidator AFFINITY_FACTOR = new
DoubleValidator(AFFINITY_FACTOR_KEY, 1.2d);
+ public static final String AFFINITY_FACTOR_KEY =
"planner.affinity_factor";
+ public static final OptionValidator AFFINITY_FACTOR = new
DoubleValidator(AFFINITY_FACTOR_KEY, 1.2d,
+ "Factor by which a node with endpoint affinity is favored while
creating assignment." +
--- End diff --
Increase the value to... (favor locality? something else?)
> Add Description Column in sys.options
> -------------------------------------
>
> Key: DRILL-4699
> URL: https://issues.apache.org/jira/browse/DRILL-4699
> Project: Apache Drill
> Issue Type: Improvement
> Components: Server, Documentation
> Affects Versions: 1.6.0
> Reporter: John Omernik
> Assignee: Paul Rogers
>
> select * from sys.options provides a user with a strong understanding of what
> options are available to Drill. These options are not well documented. Some
> options are "experimental" other options have a function only in specific
> cases (writers vs readers for example). If we had a large text field for
> description, we could enforce documentation of the settings are option
> creation time, and the description of the setting could change as the
> versions change (i.e. when an option graduates to being supported from being
> experimental, it would be changed in the version the user is using. I.e. when
> they run select * from sys.options, they know the exact state of the option
> every time they query. It could also facilitate better self documentation via
> QA on pull requests "Did you update the sys.options.desc?" This makes it
> easier for users, and admins in the use of Drill in an enterprise.
> The first step is adding the field, and then going back and filling in the
> desc for each option. (Another JIRA after the option is available)
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)