http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/core/src/main/java/org/apache/carbondata/core/util/path/HDFSLeaseUtils.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/path/HDFSLeaseUtils.java 
b/core/src/main/java/org/apache/carbondata/core/util/path/HDFSLeaseUtils.java
index 833ed8b..1a10f46 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/path/HDFSLeaseUtils.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/path/HDFSLeaseUtils.java
@@ -37,13 +37,6 @@ import org.apache.log4j.Logger;
  */
 public class HDFSLeaseUtils {
 
-  private static final int CARBON_LEASE_RECOVERY_RETRY_COUNT_MIN = 1;
-  private static final int CARBON_LEASE_RECOVERY_RETRY_COUNT_MAX = 50;
-  private static final String CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT = "5";
-  private static final int CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MIN = 1000;
-  private static final int CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MAX = 10000;
-  private static final String CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT = 
"1000";
-
   /**
    * LOGGER
    */
@@ -164,22 +157,26 @@ public class HDFSLeaseUtils {
   private static int getLeaseRecoveryRetryCount() {
     String retryMaxAttempts = CarbonProperties.getInstance()
         .getProperty(CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT,
-            CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT);
+            CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT);
     int retryCount = 0;
     try {
       retryCount = Integer.parseInt(retryMaxAttempts);
-      if (retryCount < CARBON_LEASE_RECOVERY_RETRY_COUNT_MIN
-          || retryCount > CARBON_LEASE_RECOVERY_RETRY_COUNT_MAX) {
-        retryCount = 
Integer.parseInt(CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT);
+      if (retryCount < 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_MIN
+          || retryCount > 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_MAX) {
+        retryCount = Integer.parseInt(
+            CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT);
         LOGGER.warn(
-            "value configured for " + 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT
-                + " is not in allowed range. Allowed range is >="
-                + CARBON_LEASE_RECOVERY_RETRY_COUNT_MIN + " and <="
-                + CARBON_LEASE_RECOVERY_RETRY_COUNT_MAX + ". Therefore 
considering default value: "
-                + retryCount);
+            String.format("value configured for %s is not in allowed range. 
Allowed range " +
+                    "is >= %d and <= %d. Therefore considering default value: 
%d",
+                CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT,
+                CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_MIN,
+                CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_MAX,
+                retryCount
+            ));
       }
     } catch (NumberFormatException ne) {
-      retryCount = Integer.parseInt(CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT);
+      retryCount = Integer.parseInt(
+          CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT_DEFAULT);
       LOGGER.warn("value configured for " + 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_COUNT
           + " is incorrect. Therefore considering default value: " + 
retryCount);
     }
@@ -189,22 +186,24 @@ public class HDFSLeaseUtils {
   private static int getLeaseRecoveryRetryInterval() {
     String retryMaxAttempts = CarbonProperties.getInstance()
         
.getProperty(CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL,
-            CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT);
+            
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT);
     int retryCount = 0;
     try {
       retryCount = Integer.parseInt(retryMaxAttempts);
-      if (retryCount < CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MIN
-          || retryCount > CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MAX) {
-        retryCount = 
Integer.parseInt(CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT);
+      if (retryCount < 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MIN
+          || retryCount > 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MAX) {
+        retryCount = Integer.parseInt(
+            
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT);
         LOGGER.warn(
             "value configured for " + 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL
                 + " is not in allowed range. Allowed range is >="
-                + CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MIN + " and <="
-                + CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MAX
+                + 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MIN + " and <="
+                + 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_MAX
                 + ". Therefore considering default value (ms): " + retryCount);
       }
     } catch (NumberFormatException ne) {
-      retryCount = 
Integer.parseInt(CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT);
+      retryCount = Integer.parseInt(
+          CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL_DEFAULT);
       LOGGER.warn(
           "value configured for " + 
CarbonCommonConstants.CARBON_LEASE_RECOVERY_RETRY_INTERVAL
               + " is incorrect. Therefore considering default value (ms): " + 
retryCount);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java
 
b/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java
index 7cc665e..b2b03cd 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java
+++ 
b/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java
@@ -24,7 +24,6 @@ import 
org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.util.CarbonProperties;
 
 import junit.framework.TestCase;
-import org.junit.Assert;
 import org.junit.Test;
 
 /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/core/src/test/java/org/apache/carbondata/core/keygenerator/directdictionary/DateDirectDictionaryGeneratorTest.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/carbondata/core/keygenerator/directdictionary/DateDirectDictionaryGeneratorTest.java
 
b/core/src/test/java/org/apache/carbondata/core/keygenerator/directdictionary/DateDirectDictionaryGeneratorTest.java
index 7390556..78ee6d9 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/keygenerator/directdictionary/DateDirectDictionaryGeneratorTest.java
+++ 
b/core/src/test/java/org/apache/carbondata/core/keygenerator/directdictionary/DateDirectDictionaryGeneratorTest.java
@@ -18,7 +18,6 @@ package 
org.apache.carbondata.core.keygenerator.directdictionary;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import 
org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampDirectDictionaryGenerator;
-import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 
 import org.junit.After;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index 5a4dea6..c210cae 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -121,8 +121,8 @@ This section provides the details of all the configurations 
required for the Car
 | carbon.max.executor.lru.cache.size | -1 | Maximum memory **(in MB)** upto 
which the executor process can cache the data (BTree and reverse dictionary 
values).Default value of -1 means there is no memory limit for caching. Only 
integer values greater than 0 are accepted. **NOTE:** If this parameter is not 
configured, then the value of ***carbon.max.driver.lru.cache.size*** will be 
used. |
 | max.query.execution.time | 60 | Maximum time allowed for one query to be 
executed. The value is in minutes. |
 | carbon.enableMinMax | true | CarbonData maintains the metadata which enables 
to prune unnecessary files from being scanned as per the query conditions. To 
achieve pruning, Min,Max of each column is maintined.Based on the filter 
condition in the query, certain data can be skipped from scanning by matching 
the filter value against the min,max values of the column(s) present in that 
carbondata file. This pruning enhances query performance significantly. |
-| carbon.dynamicallocation.schedulertimeout | 5 | CarbonData has its own 
scheduling algorithm to suggest to Spark on how many tasks needs to be launched 
and how much work each task need to do in a Spark cluster for any query on 
CarbonData. To determine the number of tasks that can be scheduled, knowing the 
count of active executors is necessary. When dynamic allocation is enabled on a 
YARN based spark cluster, executor processes are shutdown if no request is 
received for a particular amount of time. The executors are brought up when the 
requet is received again. This configuration specifies the maximum time (unit 
in seconds) the carbon scheduler can wait for executor to be active. Minimum 
value is 5 sec and maximum value is 15 sec.**NOTE: **Waiting for longer time 
leads to slow query response time.Moreover it might be possible that YARN is 
not able to start the executors and waiting is not beneficial. |
-| carbon.scheduler.minregisteredresourcesratio | 0.8 | Specifies the minimum 
resource (executor) ratio needed for starting the block distribution. The 
default value is 0.8, which indicates 80% of the requested resource is 
allocated for starting block distribution. The minimum value is 0.1 min and the 
maximum value is 1.0. |
+| carbon.dynamical.location.scheduler.timeout | 5 | CarbonData has its own 
scheduling algorithm to suggest to Spark on how many tasks needs to be launched 
and how much work each task need to do in a Spark cluster for any query on 
CarbonData. To determine the number of tasks that can be scheduled, knowing the 
count of active executors is necessary. When dynamic allocation is enabled on a 
YARN based spark cluster, executor processes are shutdown if no request is 
received for a particular amount of time. The executors are brought up when the 
requet is received again. This configuration specifies the maximum time (unit 
in seconds) the carbon scheduler can wait for executor to be active. Minimum 
value is 5 sec and maximum value is 15 sec.**NOTE: **Waiting for longer time 
leads to slow query response time.Moreover it might be possible that YARN is 
not able to start the executors and waiting is not beneficial. |
+| carbon.scheduler.min.registered.resources.ratio | 0.8 | Specifies the 
minimum resource (executor) ratio needed for starting the block distribution. 
The default value is 0.8, which indicates 80% of the requested resource is 
allocated for starting block distribution. The minimum value is 0.1 min and the 
maximum value is 1.0. |
 | carbon.search.enabled (Alpha Feature) | false | If set to true, it will use 
CarbonReader to do distributed scan directly instead of using compute framework 
like spark, thus avoiding limitation of compute framework like SQL optimizer 
and task scheduling overhead. |
 | carbon.search.query.timeout | 10s | Time within which the result is expected 
from the workers, beyond which the query is terminated |
 | carbon.search.scan.thread | num of cores available in worker node | Number 
of cores to be used in each worker for performing scan. |
@@ -135,7 +135,7 @@ This section provides the details of all the configurations 
required for the Car
 | carbon.custom.block.distribution | false | CarbonData has its own scheduling 
algorithm to suggest to Spark on how many tasks needs to be launched and how 
much work each task need to do in a Spark cluster for any query on CarbonData. 
When this configuration is true, CarbonData would distribute the available 
blocks to be scanned among the available number of cores.For Example:If there 
are 10 blocks to be scanned and only 3 tasks can be run(only 3 executor cores 
available in the cluster), CarbonData would combine blocks as 4,3,3 and give it 
to 3 tasks to run. **NOTE:** When this configuration is false, as per the 
***carbon.task.distribution*** configuration, each block/blocklet would be 
given to each task. |
 | enable.query.statistics | false | CarbonData has extensive logging which 
would be useful for debugging issues related to performance or hard to locate 
issues. This configuration when made ***true*** would log additional query 
statistics information to more accurately locate the issues being 
debugged.**NOTE:** Enabling this would log more debug information to log files, 
there by increasing the log files size significantly in short span of time.It 
is advised to configure the log files size, retention of log files parameters 
in log4j properties appropriately. Also extensive logging is an increased IO 
operation and hence over all query performance might get reduced. Therefore it 
is recommended to enable this configuration only for the duration of debugging. 
|
 | enable.unsafe.in.query.processing | false | CarbonData supports unsafe 
operations of Java to avoid GC overhead for certain operations. This 
configuration enables to use unsafe functions in CarbonData while scanning the  
data during query. |
-| carbon.query.validate.directqueryondatamap | true | CarbonData supports 
creating pre-aggregate table datamaps as an independent tables. For some 
debugging purposes, it might be required to directly query from such datamap 
tables. This configuration allows to query on such datamaps. |
+| carbon.query.validate.direct.query.on.datamap | true | CarbonData supports 
creating pre-aggregate table datamaps as an independent tables. For some 
debugging purposes, it might be required to directly query from such datamap 
tables. This configuration allows to query on such datamaps. |
 | carbon.heap.memory.pooling.threshold.bytes | 1048576 | CarbonData supports 
unsafe operations of Java to avoid GC overhead for certain operations. Using 
unsafe, memory can be allocated on Java Heap or off heap. This configuration 
controls the allocation mechanism on Java HEAP.If the heap memory allocations 
of the given size is greater or equal than this value,it should go through the 
pooling mechanism.But if set this size to -1, it should not go through the 
pooling mechanism.Default value is 1048576(1MB, the same as Spark).Value to be 
specified in bytes. |
 
 ## Data Mutation Configuration
@@ -204,12 +204,12 @@ RESET
 | carbon.options.bad.record.path            | Specifies the HDFS path where 
bad records needs to be stored. |
 | carbon.custom.block.distribution          | Specifies whether to use the 
Spark or Carbon block distribution feature.**NOTE: **Refer to [Query 
Configuration](#query-configuration)#carbon.custom.block.distribution for more 
details on CarbonData scheduler. |
 | enable.unsafe.sort                        | Specifies whether to use unsafe 
sort during data loading. Unsafe sort reduces the garbage collection during 
data load operation, resulting in better performance. |
-| carbon.options.dateformat                 | Specifies the data format of the 
date columns in the data being loaded |
-| carbon.options.timestampformat            | Specifies the timestamp format 
of the time stamp columns in the data being loaded |
+| carbon.options.date.format                 | Specifies the data format of 
the date columns in the data being loaded |
+| carbon.options.timestamp.format            | Specifies the timestamp format 
of the time stamp columns in the data being loaded |
 | carbon.options.sort.scope                 | Specifies how the current data 
load should be sorted with. **NOTE:** Refer to [Data Loading 
Configuration](#data-loading-configuration)#carbon.sort.scope for detailed 
information. |
 | carbon.options.global.sort.partitions     |                                  
                            |
 | carbon.options.serialization.null.format  | Default Null value 
representation in the data being loaded. **NOTE:** Refer to [Data Loading 
Configuration](#data-loading-configuration)#carbon.options.serialization.null.format
 for detailed information. |
-| carbon.query.directQueryOnDataMap.enabled | Specifies whether datamap can be 
queried directly. This is useful for debugging purposes.**NOTE: **Refer to 
[Query 
Configuration](#query-configuration)#carbon.query.validate.directqueryondatamap 
for detailed information. |
+| carbon.query.directQueryOnDataMap.enabled | Specifies whether datamap can be 
queried directly. This is useful for debugging purposes.**NOTE: **Refer to 
[Query 
Configuration](#query-configuration)#carbon.query.validate.direct.query.on.datamap
 for detailed information. |
 
 **Examples:**
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
index af1846b..24c8f17 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
@@ -47,7 +47,7 @@ class HorizontalCompactionTestCase extends QueryTest with 
BeforeAndAfterAll {
     sql(
       s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO TABLE 
iud4.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 
'BAD_RECORDS_ACTION' = 'FORCE') """)
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled, "true")
+      .addProperty(CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE, 
"true")
   }
 
 
@@ -389,7 +389,7 @@ class HorizontalCompactionTestCase extends QueryTest with 
BeforeAndAfterAll {
     sql("use default")
     sql("drop database if exists iud4 cascade")
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , 
"true")
+      .addProperty(CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE , 
"true")
     sql("""drop table if exists t_carbn01""")
     sql("""drop table if exists customer1""")
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index cc8abe4..50fdd0c 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -46,7 +46,7 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
     sql("""CREATE TABLE iud.update_01(imei string,age int,task bigint,num 
double,level decimal(10,3),name string)STORED BY 'org.apache.carbondata.format' 
""")
     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO 
TABLE iud.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 
'BAD_RECORDS_ACTION' = 'FORCE') """)
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , 
"true")
+      .addProperty(CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE , 
"true")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "true")
   }
@@ -457,7 +457,7 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
 
   test("""CARBONDATA-1445 carbon.update.persist.enable=false it will fail to 
update data""") {
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isPersistEnabled, "false")
+      .addProperty(CarbonCommonConstants.CARBON_UPDATE_PERSIST_ENABLE, "false")
     import sqlContext.implicits._
     val df = sqlContext.sparkContext.parallelize(0 to 50)
       .map(x => ("a", x.toString, (x % 2).toString, x, x.toLong, x * 2))
@@ -486,7 +486,7 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
       """).show()
     assert(sql("select stringField1 from study_carbondata where stringField2 = 
'1_test'").collect().length == 1)
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isPersistEnabled, "true")
+      .addProperty(CarbonCommonConstants.CARBON_UPDATE_PERSIST_ENABLE, "true")
     sql("DROP TABLE IF EXISTS study_carbondata ")
   }
 
@@ -555,7 +555,7 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
   }
   test("Update operation on carbon table with persist false") {
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isPersistEnabled, "false")
+      .addProperty(CarbonCommonConstants.CARBON_UPDATE_PERSIST_ENABLE, "false")
     sql("drop database if exists carbon1 cascade")
     sql(s"create database carbon1 location '$dblocation'")
     sql("use carbon1")
@@ -571,8 +571,8 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
     )
     sql("drop table carbontable")
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isPersistEnabled,
-        CarbonCommonConstants.defaultValueIsPersistEnabled)
+      .addProperty(CarbonCommonConstants.CARBON_UPDATE_PERSIST_ENABLE,
+        CarbonCommonConstants.CARBON_UPDATE_PERSIST_ENABLE_DEFAULT)
   }
 
   test("partition test update operation with 0 rows updation.") {
@@ -776,7 +776,7 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
     sql("use default")
     sql("drop database  if exists iud cascade")
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , 
"true")
+      .addProperty(CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE , 
"true")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "true")
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark-common/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
 
b/integration/spark-common/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
index 0055e87..af9cc2d 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
@@ -196,7 +196,7 @@ object CarbonReflectionUtils {
       sparkSession: SparkSession): AstBuilder = {
     val className = sparkSession.sparkContext.conf.get(
       CarbonCommonConstants.CARBON_SQLASTBUILDER_CLASSNAME,
-      "org.apache.spark.sql.hive.CarbonSqlAstBuilder")
+      CarbonCommonConstants.CARBON_SQLASTBUILDER_CLASSNAME_DEFAULT)
     createObject(className,
       conf,
       sqlParser, sparkSession)._1.asInstanceOf[AstBuilder]

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index 6905aa2..5fbe82e 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -208,7 +208,8 @@ case class CarbonLoadDataCommand(
       carbonLoadModel.setFactFilePath(factPath)
       
carbonLoadModel.setCarbonTransactionalTable(table.getTableInfo.isTransactionalTable)
       carbonLoadModel.setAggLoadRequest(
-        internalOptions.getOrElse(CarbonCommonConstants.IS_INTERNAL_LOAD_CALL, 
"false").toBoolean)
+        internalOptions.getOrElse(CarbonCommonConstants.IS_INTERNAL_LOAD_CALL,
+          CarbonCommonConstants.IS_INTERNAL_LOAD_CALL_DEFAULT).toBoolean)
       
carbonLoadModel.setSegmentId(internalOptions.getOrElse("mergedSegmentName", ""))
       val columnCompressor = 
table.getTableInfo.getFactTable.getTableProperties.asScala
         .getOrElse(CarbonCommonConstants.COMPRESSOR,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
index 21070be..37ab04f 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
@@ -183,7 +183,8 @@ case class CarbonDropTableCommand(
         CarbonUtil.deleteFoldersAndFilesSilent(file)
       }
       // Delete lock directory if external lock path is specified.
-      if 
(CarbonProperties.getInstance.getProperty(CarbonCommonConstants.LOCK_PATH, 
"").toLowerCase
+      if 
(CarbonProperties.getInstance.getProperty(CarbonCommonConstants.LOCK_PATH,
+        CarbonCommonConstants.LOCK_PATH_DEFAULT).toLowerCase
         .nonEmpty) {
         val tableLockPath = CarbonLockFactory
           .getLockpath(carbonTable.getCarbonTableIdentifier.getTableId)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
index 04b65e8..16d974e 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
@@ -47,8 +47,6 @@ import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.datamap.{TextMatch, TextMatchLimit, 
TextMatchMaxDocUDF, TextMatchUDF}
 import org.apache.carbondata.spark.CarbonAliasDecoderRelation
 import org.apache.carbondata.spark.rdd.CarbonScanRDD
-import org.apache.carbondata.spark.util.CarbonScalaUtil
-
 
 /**
  * Carbon specific optimization for late decode (convert dictionary key to 
value as late as
@@ -57,8 +55,6 @@ import org.apache.carbondata.spark.util.CarbonScalaUtil
 private[sql] class CarbonLateDecodeStrategy extends SparkStrategy {
   val PUSHED_FILTERS = "PushedFilters"
 
-  val vectorPushRowFilters = 
CarbonProperties.getInstance().isPushRowFiltersForVector
-
   /*
   Spark 2.3.1 plan there can be case of multiple projections like below
   Project [substring(name, 1, 2)#124, name#123, tupleId#117, 
cast(rand(-6778822102499951904)#125
@@ -307,6 +303,7 @@ private[sql] class CarbonLateDecodeStrategy extends 
SparkStrategy {
     // applying the filter in spark's side. So we should disable 
vectorPushRowFilters option
     // in case of filters on global dictionary.
     val hasDictionaryFilterCols = hasFilterOnDictionaryColumn(filterSet, table)
+    val vectorPushRowFilters = 
CarbonProperties.getInstance().isPushRowFiltersForVector
     if (projects.map(_.toAttribute) == projects &&
         projectSet.size == projects.size &&
         filterSet.subsetOf(projectSet)) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/integration/spark2/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
index 186e39e..c681b62 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
@@ -112,7 +112,7 @@ object CarbonSetCommand {
         sessionParams.addProperty(key.toLowerCase, value)
       } else {
         throw new MalformedCarbonCommandException("property should be in " +
-          "\" 
carbon.datamap.visible.<database_name>.<table_name>.<database_name>" +
+          "\" 
carbon.datamap.visible.<database_name>.<table_name>.<datamap_name>" +
           " = <true/false> \" format")
       }
     } else if 
(key.startsWith(CarbonCommonConstants.CARBON_LOAD_DATAMAPS_PARALLEL)) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
 
b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
index a7fc3f8..fc139a6 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
@@ -1190,8 +1190,9 @@ public final class CarbonDataMergerUtil {
    */
   public static boolean isHorizontalCompactionEnabled() {
     if ((CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.isHorizontalCompactionEnabled,
-            
CarbonCommonConstants.defaultIsHorizontalCompactionEnabled)).equalsIgnoreCase("true"))
 {
+        .getProperty(CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE,
+            CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE_DEFAULT))
+        .equalsIgnoreCase("true")) {
       return true;
     } else {
       return false;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f19fb1e/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
 
b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
index f1e1d9e..3e4e31f 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
@@ -591,7 +591,8 @@ public final class CarbonLoaderUtil {
       } else {
         LOGGER.warn("Invalid load_min_size_inmb value found: " + 
expectedMinSizePerNode
             + ", only int value greater than 0 is supported.");
-        iexpectedMinSizePerNode = 
CarbonCommonConstants.CARBON_LOAD_MIN_SIZE_DEFAULT;
+        iexpectedMinSizePerNode = Integer.parseInt(
+            CarbonCommonConstants.CARBON_LOAD_MIN_SIZE_INMB_DEFAULT);
       }
       // If the average expected size for each node greater than load min size,
       // then fall back to default strategy

Reply via email to