Repository: carbondata
Updated Branches:
  refs/heads/master de52da40f -> 873c3ded0


[CARBONDATA-2220] Reduce unnecessary audit log

This closes #2020


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/873c3ded
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/873c3ded
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/873c3ded

Branch: refs/heads/master
Commit: 873c3ded04738fa9cbef9a5f0d37902fc758a88b
Parents: de52da4
Author: Jacky Li <jacky.li...@qq.com>
Authored: Fri Mar 2 15:40:27 2018 +0800
Committer: QiangCai <qiang...@qq.com>
Committed: Fri Mar 23 22:02:42 2018 +0800

----------------------------------------------------------------------
 .../generator/TableDictionaryGenerator.java     |  2 +-
 .../server/NonSecureDictionaryServer.java       |  2 +-
 .../NonSecureDictionaryServerHandler.java       |  2 +-
 .../carbondata/core/scan/filter/FilterUtil.java |  4 +-
 .../core/util/CarbonLoadStatisticsImpl.java     | 50 ++++++++++----------
 .../carbondata/core/util/CarbonProperties.java  |  6 ++-
 .../carbondata/examples/CompareTest.scala       |  4 +-
 .../management/CarbonInsertIntoCommand.scala    |  2 +-
 .../CarbonAlterTableDropPartitionCommand.scala  |  1 -
 .../CarbonAlterTableSplitPartitionCommand.scala |  1 -
 .../spark/sql/hive/CarbonFileMetastore.scala    |  2 +-
 .../spark/sql/hive/CarbonHiveMetadataUtil.scala |  2 +-
 .../apache/spark/sql/hive/CarbonMetaStore.scala |  4 +-
 .../holder/UnsafeFinalMergePageHolder.java      |  2 +-
 .../unsafe/holder/UnsafeInmemoryHolder.java     |  2 +-
 .../holder/UnsafeInmemoryMergeHolder.java       |  2 +-
 .../holder/UnsafeSortTempFileChunkHolder.java   |  2 +-
 .../util/CarbonDataProcessorUtil.java           | 15 ++++--
 .../carbondata/streaming/StreamHandoffRDD.scala |  2 +-
 19 files changed, 55 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/core/src/main/java/org/apache/carbondata/core/dictionary/generator/TableDictionaryGenerator.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/TableDictionaryGenerator.java
 
b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/TableDictionaryGenerator.java
index 905d2fa..5db13b6 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/TableDictionaryGenerator.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/TableDictionaryGenerator.java
@@ -84,7 +84,7 @@ public class TableDictionaryGenerator
     } catch (InterruptedException e) {
       LOGGER.error("Error loading the dictionary: " + e.getMessage());
     }
-    LOGGER.audit("Total time taken to write dictionary file is: " +
+    LOGGER.info("Total time taken to write dictionary file is: " +
             (System.currentTimeMillis() - start));
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServer.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServer.java
 
b/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServer.java
index daba470..95f3d69 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServer.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServer.java
@@ -113,7 +113,7 @@ public class NonSecureDictionaryServer extends 
AbstractDictionaryServer
             new InetSocketAddress(newPort) :
             new InetSocketAddress(hostToBind, newPort);
         bootstrap.bind(address).sync();
-        LOGGER.audit("Dictionary Server started, Time spent " + 
(System.currentTimeMillis() - start)
+        LOGGER.info("Dictionary Server started, Time spent " + 
(System.currentTimeMillis() - start)
             + " Listening on port " + newPort);
         this.port = newPort;
         this.host = hostToBind;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServerHandler.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServerHandler.java
 
b/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServerHandler.java
index dc3d078..82efe80 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServerHandler.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/dictionary/server/NonSecureDictionaryServerHandler.java
@@ -48,7 +48,7 @@ import io.netty.channel.ChannelInboundHandlerAdapter;
    * @throws Exception
    */
   public void channelActive(ChannelHandlerContext ctx) throws Exception {
-    LOGGER.audit("Connected " + ctx);
+    LOGGER.info("Connected " + ctx);
     super.channelActive(ctx);
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
index 0b72e77..a397355 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
@@ -777,7 +777,7 @@ public final class FilterUtil {
         columnFilterInfo.setFilterList(filterValuesList);
       }
     } catch (FilterIllegalMemberException e) {
-      LOGGER.audit(e.getMessage());
+      LOGGER.error(e.getMessage());
     }
     return columnFilterInfo;
   }
@@ -817,7 +817,7 @@ public final class FilterUtil {
         }
       }
     } catch (FilterIllegalMemberException e) {
-      LOGGER.audit(e.getMessage());
+      LOGGER.error(e.getMessage());
     }
 
     if (null == defaultValues) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
index c9fc8ba..9c6ab95 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
@@ -311,52 +311,52 @@ public class CarbonLoadStatisticsImpl implements 
LoadStatistics {
   //Print the statistics information
   private void printDicGenStatisticsInfo() {
     double loadCsvfilesToDfTime = getLoadCsvfilesToDfTime();
-    LOGGER.audit("STAGE 1 ->Load csv to DataFrame and generate" +
+    LOGGER.info("STAGE 1 ->Load csv to DataFrame and generate" +
             " block distinct values: " + loadCsvfilesToDfTime + "(s)");
     double dicShuffleAndWriteFileTotalTime = 
getDicShuffleAndWriteFileTotalTime();
-    LOGGER.audit("STAGE 2 ->Global dict shuffle and write dict file: " +
+    LOGGER.info("STAGE 2 ->Global dict shuffle and write dict file: " +
             + dicShuffleAndWriteFileTotalTime + "(s)");
   }
 
   private void printLruCacheLoadTimeInfo() {
-    LOGGER.audit("STAGE 3 ->LRU cache load: " + getLruCacheLoadTime() + "(s)");
+    LOGGER.info("STAGE 3 ->LRU cache load: " + getLruCacheLoadTime() + "(s)");
   }
 
   private void printDictionaryValuesGenStatisticsInfo(String partitionID) {
     double dictionaryValuesTotalTime = 
getDictionaryValuesTotalTime(partitionID);
-    LOGGER.audit("STAGE 4 ->Total cost of gen dictionary values, sort and 
write to temp files: "
+    LOGGER.info("STAGE 4 ->Total cost of gen dictionary values, sort and write 
to temp files: "
             + dictionaryValuesTotalTime + "(s)");
     double csvInputStepTime = getCsvInputStepTime(partitionID);
     double generatingDictionaryValuesTime = 
getGeneratingDictionaryValuesTime(partitionID);
-    LOGGER.audit("STAGE 4.1 ->  |_read csv file: " + csvInputStepTime + "(s)");
-    LOGGER.audit("STAGE 4.2 ->  |_transform to surrogate key: "
+    LOGGER.info("STAGE 4.1 ->  |_read csv file: " + csvInputStepTime + "(s)");
+    LOGGER.info("STAGE 4.2 ->  |_transform to surrogate key: "
             + generatingDictionaryValuesTime + "(s)");
   }
 
   private void printSortRowsStepStatisticsInfo(String partitionID) {
     double sortRowsStepTotalTime = getSortRowsStepTotalTime(partitionID);
-    LOGGER.audit("STAGE 4.3 ->  |_sort rows and write to temp file: "
+    LOGGER.info("STAGE 4.3 ->  |_sort rows and write to temp file: "
             + sortRowsStepTotalTime + "(s)");
   }
 
   private void printGenMdkStatisticsInfo(String partitionID) {
     double dictionaryValue2MdkAdd2FileTime = 
getDictionaryValue2MdkAdd2FileTime(partitionID);
-    LOGGER.audit("STAGE 5 ->Transform to MDK, compress and write fact files: "
+    LOGGER.info("STAGE 5 ->Transform to MDK, compress and write fact files: "
             + dictionaryValue2MdkAdd2FileTime + "(s)");
   }
 
   //Print the node blocks information
   private void printHostBlockMapInfo() {
-    LOGGER.audit("========== BLOCK_INFO ==========");
+    LOGGER.info("========== BLOCK_INFO ==========");
     if (getHostBlockMap().size() > 0) {
       for (String host: getHostBlockMap().keySet()) {
-        LOGGER.audit("BLOCK_INFO ->Node host: " + host);
-        LOGGER.audit("BLOCK_INFO ->The block count in this node: " + 
getHostBlockMap().get(host));
+        LOGGER.info("BLOCK_INFO ->Node host: " + host);
+        LOGGER.info("BLOCK_INFO ->The block count in this node: " + 
getHostBlockMap().get(host));
       }
     } else if (getPartitionBlockMap().size() > 0) {
       for (String parID: getPartitionBlockMap().keySet()) {
-        LOGGER.audit("BLOCK_INFO ->Partition ID: " + parID);
-        LOGGER.audit("BLOCK_INFO ->The block count in this partition: " +
+        LOGGER.info("BLOCK_INFO ->Partition ID: " + parID);
+        LOGGER.info("BLOCK_INFO ->The block count in this partition: " +
                 getPartitionBlockMap().get(parID));
       }
     }
@@ -364,21 +364,21 @@ public class CarbonLoadStatisticsImpl implements 
LoadStatistics {
 
   //Print the speed information
   private void printLoadSpeedInfo(String partitionID) {
-    LOGGER.audit("===============Load_Speed_Info===============");
-    LOGGER.audit("Total Num of Records Processed: " + getTotalRecords());
-    LOGGER.audit("Total Time Cost: " + getTotalTime(partitionID) + "(s)");
-    LOGGER.audit("Total Load Speed: " + getLoadSpeed() + "records/s");
-    LOGGER.audit("Generate Dictionaries Speed: " + getGenDicSpeed() + 
"records/s");
-    LOGGER.audit("Read CSV Speed: " + getReadCSVSpeed(partitionID) + " 
records/s");
-    LOGGER.audit("Generate Surrogate Key Speed: " + 
getGenSurKeySpeed(partitionID) + " records/s");
-    LOGGER.audit("Sort Key/Write Temp Files Speed: " + 
getSortKeySpeed(partitionID) + " records/s");
-    LOGGER.audit("MDK Step Speed: " + getMDKSpeed(partitionID) + " records/s");
-    LOGGER.audit("=============================================");
+    LOGGER.info("===============Load_Speed_Info===============");
+    LOGGER.info("Total Num of Records Processed: " + getTotalRecords());
+    LOGGER.info("Total Time Cost: " + getTotalTime(partitionID) + "(s)");
+    LOGGER.info("Total Load Speed: " + getLoadSpeed() + "records/s");
+    LOGGER.info("Generate Dictionaries Speed: " + getGenDicSpeed() + 
"records/s");
+    LOGGER.info("Read CSV Speed: " + getReadCSVSpeed(partitionID) + " 
records/s");
+    LOGGER.info("Generate Surrogate Key Speed: " + 
getGenSurKeySpeed(partitionID) + " records/s");
+    LOGGER.info("Sort Key/Write Temp Files Speed: " + 
getSortKeySpeed(partitionID) + " records/s");
+    LOGGER.info("MDK Step Speed: " + getMDKSpeed(partitionID) + " records/s");
+    LOGGER.info("=============================================");
   }
 
   public void printStatisticsInfo(String partitionID) {
     try {
-      LOGGER.audit("========== TIME_STATISTICS PartitionID: " + partitionID + 
"==========");
+      LOGGER.info("========== TIME_STATISTICS PartitionID: " + partitionID + 
"==========");
       printDicGenStatisticsInfo();
       printLruCacheLoadTimeInfo();
       printDictionaryValuesGenStatisticsInfo(partitionID);
@@ -387,7 +387,7 @@ public class CarbonLoadStatisticsImpl implements 
LoadStatistics {
       printHostBlockMapInfo();
       printLoadSpeedInfo(partitionID);
     } catch (Exception e) {
-      LOGGER.audit("Can't print Statistics Information");
+      LOGGER.error("Can't print Statistics Information");
     } finally {
       resetLoadStatistics();
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
index acc266c..5ac359b 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -947,8 +947,10 @@ public final class CarbonProperties {
   public int getNumberOfCores() {
     int numberOfCores;
     try {
-      numberOfCores = Integer.parseInt(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.NUM_CORES_LOADING));
+      numberOfCores = Integer.parseInt(
+          CarbonProperties.getInstance().getProperty(
+              CarbonCommonConstants.NUM_CORES_LOADING,
+              CarbonCommonConstants.NUM_CORES_DEFAULT_VAL));
     } catch (NumberFormatException exc) {
       LOGGER.error("Configured value for property " + 
CarbonCommonConstants.NUM_CORES_LOADING
           + " is wrong. Falling back to the default value "

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/examples/spark2/src/main/scala/org/apache/carbondata/examples/CompareTest.scala
----------------------------------------------------------------------
diff --git 
a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CompareTest.scala
 
b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CompareTest.scala
index 64ccca9..d27b1c4 100644
--- 
a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CompareTest.scala
+++ 
b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CompareTest.scala
@@ -268,16 +268,14 @@ object CompareTest {
   private def loadCarbonTable(spark: SparkSession, input: DataFrame, 
tableName: String): Double = {
     CarbonProperties.getInstance().addProperty(
       CarbonCommonConstants.CARBON_DATA_FILE_VERSION,
-      "3"
+      "V3"
     )
     spark.sql(s"drop table if exists $tableName")
     time {
       input.write
           .format("carbondata")
           .option("tableName", tableName)
-          .option("tempCSV", "false")
           .option("single_pass", "true")
-          .option("dictionary_exclude", "id") // id is high cardinality column
           .option("table_blocksize", "32")
           .mode(SaveMode.Overwrite)
           .save()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
index 86d6759..702f954 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
@@ -52,7 +52,7 @@ case class CarbonInsertIntoCommand(
       isPersistEnabledUserValue.equalsIgnoreCase("true") || 
containsLimit(child)
     val df =
       if (isPersistRequired) {
-        LOGGER.audit("Persist enabled for Insert operation")
+        LOGGER.info("Persist enabled for Insert operation")
         Dataset.ofRows(sparkSession, child)
           .persist(StorageLevel.MEMORY_AND_DISK)
       } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropPartitionCommand.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropPartitionCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropPartitionCommand.scala
index 705455f..9c2835e 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropPartitionCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropPartitionCommand.scala
@@ -158,7 +158,6 @@ case class CarbonAlterTableDropPartitionCommand(
       CacheProvider.getInstance().dropAllCache()
       AlterTableUtil.releaseLocks(locks)
       LOGGER.info("Locks released after alter table drop partition action.")
-      LOGGER.audit("Locks released after alter table drop partition action.")
     }
     LOGGER.info(s"Alter table drop partition is successful for table 
$dbName.$tableName")
     LOGGER.audit(s"Alter table drop partition is successful for table 
$dbName.$tableName")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableSplitPartitionCommand.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableSplitPartitionCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableSplitPartitionCommand.scala
index 99691d2..4b89296 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableSplitPartitionCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableSplitPartitionCommand.scala
@@ -167,7 +167,6 @@ case class CarbonAlterTableSplitPartitionCommand(
       CacheProvider.getInstance().dropAllCache()
       val LOGGER: LogService = 
LogServiceFactory.getLogService(this.getClass.getName)
       LOGGER.info("Locks released after alter table add/split partition 
action.")
-      LOGGER.audit("Locks released after alter table add/split partition 
action.")
       if (success) {
         LOGGER.info(s"Alter table add/split partition is successful for table 
$dbName.$tableName")
         LOGGER.audit(s"Alter table add/split partition is successful for table 
$dbName.$tableName")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
index b3438a4..52bae1a 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
@@ -487,7 +487,7 @@ class CarbonFileMetastore extends CarbonMetaStore {
   private def touchSchemaFileSystemTime(): Long = {
     val (timestampFile, timestampFileType) = getTimestampFileAndType()
     if (!FileFactory.isFileExist(timestampFile, timestampFileType)) {
-      LOGGER.audit(s"Creating timestamp file for $timestampFile")
+      LOGGER.info(s"Creating timestamp file for $timestampFile")
       FileFactory
         .createNewFile(timestampFile,
           timestampFileType,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
index d67ae6b..dfa9904 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
@@ -46,7 +46,7 @@ object CarbonHiveMetadataUtil {
       sparkSession.sessionState.catalog.dropTable(tabelIdentifier, true, false)
     } catch {
       case e: Exception =>
-        LOGGER.audit(
+        LOGGER.error(
           s"Error While deleting the table $databaseName.$tableName during 
drop carbon table" +
           e.getMessage)
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
index 3f1ebbc..9127f14 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
@@ -184,10 +184,10 @@ object CarbonMetaStoreFactory {
   def createCarbonMetaStore(conf: RuntimeConfig): CarbonMetaStore = {
     val readSchemaFromHiveMetaStore = readSchemaFromHive(conf)
     if (readSchemaFromHiveMetaStore) {
-      LOGGER.audit("Hive based carbon metastore is enabled")
+      LOGGER.info("Hive based carbon metastore is enabled")
       new CarbonHiveMetaStore()
     } else {
-      LOGGER.audit("File based carbon metastore is enabled")
+      LOGGER.info("File based carbon metastore is enabled")
       new CarbonFileMetastore()
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
index a776db1..c966ff2 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
@@ -49,7 +49,7 @@ public class UnsafeFinalMergePageHolder implements 
SortTempChunkHolder {
     this.mergedAddresses = merger.getMergedAddresses();
     this.rowPageIndexes = merger.getRowPageIndexes();
     this.rowPages = merger.getUnsafeCarbonRowPages();
-    LOGGER.audit("Processing unsafe inmemory rows page with size : " + 
actualSize);
+    LOGGER.info("Processing unsafe inmemory rows page with size : " + 
actualSize);
     this.comparator = new 
IntermediateSortTempRowComparator(noDictSortColumnMapping);
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
index cbcbbae..8b4b550 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
@@ -43,7 +43,7 @@ public class UnsafeInmemoryHolder implements 
SortTempChunkHolder {
   public UnsafeInmemoryHolder(UnsafeCarbonRowPage rowPage) {
     this.actualSize = rowPage.getBuffer().getActualSize();
     this.rowPage = rowPage;
-    LOGGER.audit("Processing unsafe inmemory rows page with size : " + 
actualSize);
+    LOGGER.info("Processing unsafe inmemory rows page with size : " + 
actualSize);
     this.comparator = new IntermediateSortTempRowComparator(
         rowPage.getTableFieldStat().getIsSortColNoDictFlags());
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryMergeHolder.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryMergeHolder.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryMergeHolder.java
index 3b9d8d7..f8689d9 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryMergeHolder.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryMergeHolder.java
@@ -48,7 +48,7 @@ public class UnsafeInmemoryMergeHolder implements 
Comparable<UnsafeInmemoryMerge
   public UnsafeInmemoryMergeHolder(UnsafeCarbonRowPage rowPage, byte index) {
     this.actualSize = rowPage.getBuffer().getActualSize();
     this.rowPage = rowPage;
-    LOGGER.audit("Processing unsafe inmemory rows page with size : " + 
actualSize);
+    LOGGER.info("Processing unsafe inmemory rows page with size : " + 
actualSize);
     this.comparator = new UnsafeRowComparator(rowPage);
     this.baseObject = rowPage.getDataBlock().getBaseObject();
     currentRow = new UnsafeCarbonRowForMerge();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
index 527452a..86b7ac8 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
@@ -131,7 +131,7 @@ public class UnsafeSortTempFileChunkHolder implements 
SortTempChunkHolder {
       stream = FileFactory.getDataInputStream(tempFile.getPath(), 
FileFactory.FileType.LOCAL,
           readBufferSize, compressorName);
       this.entryCount = stream.readInt();
-      LOGGER.audit("Processing unsafe mode file rows with size : " + 
entryCount);
+      LOGGER.info("Processing unsafe mode file rows with size : " + 
entryCount);
       if (prefetch) {
         new DataFetcher(false).call();
         totalRecordFetch += currentBuffer.length;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
 
b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
index ba2b0c2..392ad59 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
@@ -110,8 +110,13 @@ public final class CarbonDataProcessorUtil {
    */
   public static void createLocations(String[] locations) {
     for (String loc : locations) {
-      if (!new File(loc).mkdirs()) {
-        LOGGER.warn("Error occurs while creating dirs: " + loc);
+      File dir = new File(loc);
+      if (dir.exists()) {
+        LOGGER.warn("dir already exists, skip dir creation: " + loc);
+      } else {
+        if (!dir.mkdirs()) {
+          LOGGER.error("Error occurs while creating dir: " + loc);
+        }
       }
     }
   }
@@ -433,7 +438,7 @@ public final class CarbonDataProcessorUtil {
             
configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_SORT_SCOPE)
                 .toString());
       }
-      LOGGER.warn("sort scope is set to " + sortScope);
+      LOGGER.info("sort scope is set to " + sortScope);
     } catch (Exception e) {
       sortScope = 
SortScopeOptions.getSortScope(CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT);
       LOGGER.warn("Exception occured while resolving sort scope. " +
@@ -453,7 +458,7 @@ public final class CarbonDataProcessorUtil {
       } else {
         sortScope = SortScopeOptions.getSortScope(sortScopeString);
       }
-      LOGGER.warn("sort scope is set to " + sortScope);
+      LOGGER.info("sort scope is set to " + sortScope);
     } catch (Exception e) {
       sortScope = 
SortScopeOptions.getSortScope(CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT);
       LOGGER.warn("Exception occured while resolving sort scope. " +
@@ -481,7 +486,7 @@ public final class CarbonDataProcessorUtil {
             
configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB)
                 .toString());
       }
-      LOGGER.warn("batch sort size is set to " + batchSortSizeInMb);
+      LOGGER.info("batch sort size is set to " + batchSortSizeInMb);
     } catch (Exception e) {
       batchSortSizeInMb = 0;
       LOGGER.warn("Exception occured while resolving batch sort size. " +

http://git-wip-us.apache.org/repos/asf/carbondata/blob/873c3ded/streaming/src/main/scala/org/apache/carbondata/streaming/StreamHandoffRDD.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/carbondata/streaming/StreamHandoffRDD.scala
 
b/streaming/src/main/scala/org/apache/carbondata/streaming/StreamHandoffRDD.scala
index 47ef5f2..0a95c24 100644
--- 
a/streaming/src/main/scala/org/apache/carbondata/streaming/StreamHandoffRDD.scala
+++ 
b/streaming/src/main/scala/org/apache/carbondata/streaming/StreamHandoffRDD.scala
@@ -351,7 +351,7 @@ object StreamHandoffRDD {
       LOGGER.info("********clean up done**********")
       LOGGER.audit(s"Handoff is failed for " +
                    s"${ carbonLoadModel.getDatabaseName }.${ 
carbonLoadModel.getTableName }")
-      LOGGER.warn("Cannot write load metadata file as handoff failed")
+      LOGGER.error("Cannot write load metadata file as handoff failed")
       throw new Exception(errorMessage)
     }
 

Reply via email to