This is an automated email from the ASF dual-hosted git repository.
qiangcai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git
The following commit(s) were added to refs/heads/master by this push:
new d71074b [CARBONDATA-3791] Updated documentation for dynamic
configuration params
d71074b is described below
commit d71074b50889218fc8502b3816350908b6d99bf8
Author: Venu Reddy <[email protected]>
AuthorDate: Sat May 9 02:39:51 2020 +0530
[CARBONDATA-3791] Updated documentation for dynamic configuration params
Why is this PR needed?
Dynamic configuration params weren't updated in documentation.
What changes were proposed in this PR?
Updated documentation for dynamic configuration params and add testcases
This closes #3756
---
.../core/constants/CarbonCommonConstants.java | 2 +-
docs/configuration-parameters.md | 15 +++++++++++++++
.../sdv/generated/SetParameterTestCase.scala | 21 +++++++++++++++++++++
3 files changed, 37 insertions(+), 1 deletion(-)
diff --git
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 43965a2..4e24618 100644
---
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1304,7 +1304,7 @@ public final class CarbonCommonConstants {
* if process crashed when overwriting the table status file.
* To protect from file corruption, user can enable this property.
*/
- @CarbonProperty(dynamicConfigurable = true)
+ @CarbonProperty
public static final String ENABLE_TABLE_STATUS_BACKUP =
"carbon.enable.tablestatus.backup";
public static final String ENABLE_TABLE_STATUS_BACKUP_DEFAULT = "false";
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index 3d81321..1e4e9f3 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -210,6 +210,21 @@ RESET
| carbon.table.load.sort.scope.<db_name>.<table_name> | Overrides the
SORT_SCOPE provided in CREATE TABLE. |
| carbon.options.global.sort.partitions | Specifies the number of
partitions to be used during global sort. |
| carbon.options.serialization.null.format | Default Null value
representation in the data being loaded. **NOTE:** Refer to [Data Loading
Configuration](#data-loading-configuration)#carbon.options.serialization.null.format
for detailed information. |
+| carbon.number.of.cores.while.loading | Specifies number of cores to be
used while loading data. **NOTE:** Refer to [Data Loading
Configuration](#data-loading-configuration)#carbon.number.of.cores.while.loading
for detailed information. |
+| carbon.number.of.cores.while.compacting | Specifies number of cores to be
used while compacting data. **NOTE:** Refer to [Data Loading
Configuration](#data-loading-configuration)#carbon.number.of.cores.while.compacting
for detailed information. |
+| enable.offheap.sort | To enable off-heap memory usage.
**NOTE:** Refer to [Data Loading
Configuration](#data-loading-configuration)#enable.offheap.sort for detailed
information. |
+| carbon.blockletgroup.size.in.mb | Specifies the size of each
blocklet group. **NOTE:** Refer to [Data Loading
Configuration](#data-loading-configuration)#carbon.blockletgroup.size.in.mb for
detailed information. |
+| carbon.enable.auto.load.merge | To enable compaction along with
data loading. **NOTE:** Refer to [Compaction
Configuration](#compaction-configuration)#carbon.enable.auto.load.merge for
detailed information. |
+| carbon.major.compaction.size | To configure major compaction
size. **NOTE:** Refer to [Compaction
Configuration](#compaction-configuration)#carbon.major.compaction.size for
detailed information. |
+| carbon.compaction.level.threshold | To configure compaction
threshold. **NOTE:** Refer to [Compaction
Configuration](#compaction-configuration)#carbon.compaction.level.threshold for
detailed information. |
+| carbon.enable.vector.reader | To enable fetching data as
columnar batch of size 4*1024 rows instead of fetching a row at a time.
**NOTE:** Refer to [Query
Configuration](#query-configuration)#carbon.enable.vector.reader for detailed
information. |
+| enable.unsafe.in.query.processing | To enable use of unsafe
functions while scanning the data during query. **NOTE:** Refer to [Query
Configuration](#query-configuration)#enable.unsafe.in.query.processing for
detailed information. |
+| carbon.push.rowfilters.for.vector | To enable complete row filters
handling by carbon in case of vector. **NOTE:** Refer to [Query
Configuration](#query-configuration)#carbon.push.rowfilters.for.vector for
detailed information. |
+| carbon.query.stage.input.enable | To make query to include staged
input files. **NOTE:** Refer to [Query
Configuration](#query-configuration)#carbon.query.stage.input.enable for
detailed information. |
+| carbon.input.segments.<db_name>.<table_name> | To specify the segment ids to
query from the table. segments ids are separated by comma. |
+| carbon.index.visible.<db_name>.<table_name>.<index_name> | To specify query
on ***db_name.table_name*** to not use the index ***index_name***. |
+| carbon.load.indexes.parallel.<db_name>.<table_name> | To enable parallel
index loading for a table. when db_name.table_name are not specified, i.e.,
when ***carbon.load.indexes.parallel.*** is set, it applies for all the tables
of the session. |
+| carbon.enable.index.server | To use index server for caching
and pruning. This property can be used for a session or for a particular table
with ***carbon.enable.index.server.<db_name>.<table_name>***. |
**Examples:**
diff --git
a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
index 8caf80f..d71dca2 100644
---
a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
+++
b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
@@ -231,6 +231,27 @@ class SetParameterTestCase extends QueryTest with
BeforeAndAfterAll {
assert(getLogFileCount("default", "carbon_table_load", "0") >= 1)
}
+ test("TC_013-test set dynamic properties") {
+ sql("SET carbon.number.of.cores.while.loading=8")
+ sql("SET carbon.number.of.cores.while.compacting=8")
+ sql("SET enable.offheap.sort=true")
+ sql("SET carbon.blockletgroup.size.in.mb=1")
+ sql("SET carbon.enable.auto.load.merge=true")
+ sql("SET carbon.major.compaction.size=1")
+ sql("SET carbon.compaction.level.threshold=3,2")
+ sql("SET carbon.enable.vector.reader=true")
+ sql("SET enable.unsafe.in.query.processing=true")
+ sql("SET carbon.push.rowfilters.for.vector=true")
+ sql("SET carbon.query.stage.input.enable=true")
+ sql("SET carbon.input.segments.default.source=*")
+ sql("SET carbon.input.segments.default.source=0,1")
+ sql("SET carbon.index.visible.default.source.bloom=true")
+ sql("SET carbon.load.indexes.parallel.default.source=true")
+ sql("SET carbon.enable.index.server=false")
+ sql("SET carbon.enable.index.server.default.source=false")
+ sql("RESET")
+ }
+
private def getLogFileCount(dbName: String, tableName: String, segment:
String): Int = {
var path = resourcesPath + "/" + dbName + "/" + tableName + "/" + segment
+ "/" + segment
val carbonFiles = FileFactory.getCarbonFile(path).listFiles(new
CarbonFileFilter {