carbondata git commit: [CARBONDATA-2317] Concurrent datamap with same name and schema creation throws exception

2018-04-10 Thread manishgupta88
Repository: carbondata
Updated Branches:
  refs/heads/master 9ca9b6d0c -> 4cbd5cdf2


[CARBONDATA-2317] Concurrent datamap with same name and schema creation throws 
exception

Concurrent datamap with same name and schema creation throws exception

This closes #2143


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/4cbd5cdf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/4cbd5cdf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/4cbd5cdf

Branch: refs/heads/master
Commit: 4cbd5cdf20341f1ae07c7cbb8275c1e333856cae
Parents: 9ca9b6d
Author: rahulforallp 
Authored: Fri Apr 6 15:17:54 2018 +0530
Committer: manishgupta88 
Committed: Wed Apr 11 11:15:15 2018 +0530

--
 .../preaggregate/TestPreAggCreateCommand.scala  | 44 
 .../table/CarbonCreateTableCommand.scala| 31 +++---
 2 files changed, 61 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/4cbd5cdf/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
index e546fe8..7cb1adf 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
@@ -17,7 +17,14 @@
 
 package org.apache.carbondata.integration.spark.testsuite.preaggregate
 
+import java.util
+import java.util.concurrent.{Callable, ExecutorService, Executors, TimeUnit}
+
 import scala.collection.JavaConverters._
+import scala.concurrent.ExecutionContext.Implicits.global
+import scala.concurrent.{Await, Future}
+import scala.concurrent.duration.Duration
+import scala.util.{Failure, Success}
 
 import org.apache.spark.sql.{AnalysisException, 
CarbonDatasourceHadoopRelation, Row}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
@@ -429,6 +436,43 @@ class TestPreAggCreateCommand extends QueryTest with 
BeforeAndAfterAll {
 }
   }
 
+  test("test creation of multiple preaggregate of same name concurrently ") {
+sql("DROP TABLE IF EXISTS tbl_concurr")
+sql(
+  "create table if not exists  tbl_concurr(imei string,age int,mac string 
,prodate timestamp," +
+  "update timestamp,gamepoint double,contrid double) stored by 
'carbondata' ")
+
+var executorService: ExecutorService = Executors.newCachedThreadPool()
+val tasks = new util.ArrayList[Callable[String]]()
+var i = 0
+val count = 5
+while (i < count) {
+  tasks
+.add(new QueryTask(
+  s"""create datamap agg_concu1 on table tbl_concurr using
+ |'preaggregate' as select prodate, mac from tbl_concurr group by 
prodate,mac"""
+.stripMargin))
+  i = i + 1
+}
+executorService.invokeAll(tasks)
+
+checkExistence(sql("show tables"), true, "agg_concu1", "tbl_concurr")
+executorService.shutdown()
+  }
+
+  class QueryTask(query: String) extends Callable[String] {
+override def call(): String = {
+  var result = "SUCCESS"
+  try {
+sql(query).collect()
+  } catch {
+case exception: Exception => LOGGER.error(exception.getMessage)
+  }
+  result
+}
+  }
+
+
   def getCarbonTable(plan: LogicalPlan) : CarbonTable ={
 var carbonTable : CarbonTable = null
 plan.transform {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4cbd5cdf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
index 65c6269..6266c53 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
@@ -121,21 +121,24 @@ case class CarbonCreateTableCommand(
   // isVisible property is added to hive table properties to 
differentiate between main
   // 

carbondata git commit: [CARBONDATA-2257] Added SDV test cases for Partition with Global Sort

2018-04-10 Thread manishgupta88
Repository: carbondata
Updated Branches:
  refs/heads/master 8a5369d2b -> 9ca9b6d0c


[CARBONDATA-2257] Added SDV test cases for Partition with Global Sort

Added SDV test cases for Partition with Global Sort

This closes #2066


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9ca9b6d0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9ca9b6d0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9ca9b6d0

Branch: refs/heads/master
Commit: 9ca9b6d0cc19444bd6a5c7826bad0c97584712e9
Parents: 8a5369d
Author: praveenmeenakshi56 
Authored: Wed Mar 14 20:29:11 2018 +0530
Committer: manishgupta88 
Committed: Wed Apr 11 11:06:51 2018 +0530

--
 .../generated/TestPartitionWithGlobalSort.scala | 282 +++
 .../cluster/sdv/suite/SDVSuites.scala   |   7 +-
 2 files changed, 287 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9ca9b6d0/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/TestPartitionWithGlobalSort.scala
--
diff --git 
a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/TestPartitionWithGlobalSort.scala
 
b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/TestPartitionWithGlobalSort.scala
new file mode 100644
index 000..31ce5fa
--- /dev/null
+++ 
b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/TestPartitionWithGlobalSort.scala
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.cluster.sdv.generated
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util._
+import org.scalatest.BeforeAndAfterAll
+
+/**
+  * Test Class for partitionTestCase to verify all scenarios on Partition with 
Global Sort
+  */
+
+class TestPartitionWithGlobalSort extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll = {
+CarbonProperties.getInstance()
+  .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "/MM/dd 
HH:mm:ss")
+  .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "/MM/dd")
+  }
+
+  //Loading data into Partitioned table with Global Sort
+  test("Partition-Global-Sort_TC001", Include) {
+sql(s"""drop table if exists partition_table""")
+sql(s"""CREATE TABLE partition_table(shortField SHORT, intField INT, 
bigintField LONG, doubleField DOUBLE, timestamp TIMESTAMP, decimalField 
DECIMAL(18,2),dateField DATE, charField CHAR(5), floatField FLOAT ) PARTITIONED 
BY (stringField STRING) STORED BY 'carbondata' 
TBLPROPERTIES('SORT_SCOPE'='GLOBAL_SORT')""")
+sql(s"""load data inpath 
'$resourcesPath/Data/partition/list_partition_table.csv' into table 
partition_table 
options('FILEHEADER'='shortfield,intfield,bigintfield,doublefield,stringfield,timestamp,decimalfield,datefield,charfield,floatfield')""")
+checkAnswer(sql(s"""select count(*) from partition_table"""), Seq(Row(11)))
+sql(s"""drop table if exists partition_table""")
+  }
+
+  //Verify Exception when Loading data into a Partitioned table with Global 
Sort and Bad Records Action = FAIL
+  test("Partition-Global-Sort_TC002", Include) {
+sql(s"""drop table if exists partition_table""")
+sql(s"""CREATE TABLE partition_table(shortField SHORT, intField INT, 
bigintField LONG, doubleField DOUBLE, timestamp TIMESTAMP, decimalField 
DECIMAL(18,2),dateField DATE, charField CHAR(5), floatField FLOAT ) PARTITIONED 
BY (stringField STRING) STORED BY 'carbondata' 
TBLPROPERTIES('SORT_SCOPE'='GLOBAL_SORT')""")
+intercept[Exception] {
+  sql(s"""load data inpath 
'$resourcesPath/Data/partition/list_partition_table.csv' into table 
partition_table 

[1/2] carbondata git commit: [CARBONDATA-2310] Refactored code to improve Distributable interface

2018-04-10 Thread manishgupta88
Repository: carbondata
Updated Branches:
  refs/heads/branch-1.3 31c7b505a -> 3c48df396


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3c48df39/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
--
diff --git 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
 
b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index 094b8c3..62d24b7 100644
--- 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ 
b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -31,6 +31,8 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datamap.DataMapStoreManager;
 import org.apache.carbondata.core.datamap.Segment;
@@ -132,6 +134,12 @@ public class CarbonTableInputFormat extends 
FileInputFormat {
   public static final String UPADTE_T =
   "mapreduce.input.carboninputformat.partitions.to.prune";
 
+  /**
+   * Attribute for Carbon LOGGER.
+   */
+  private static final LogService LOGGER =
+  LogServiceFactory.getLogService(CarbonProperties.class.getName());
+
   // a cache for carbon table, it will be used in task side
   private CarbonTable carbonTable;
 
@@ -192,7 +200,7 @@ public class CarbonTableInputFormat extends 
FileInputFormat {
   }
 
 
-  public static void setDataMapJob(Configuration configuration, DataMapJob 
dataMapJob)
+  public static void setDataMapJob(Configuration configuration, Object 
dataMapJob)
   throws IOException {
 if (dataMapJob != null) {
   String toString = 
ObjectSerializationUtil.convertObjectToString(dataMapJob);
@@ -200,7 +208,7 @@ public class CarbonTableInputFormat extends 
FileInputFormat {
 }
   }
 
-  private static DataMapJob getDataMapJob(Configuration configuration) throws 
IOException {
+  public static DataMapJob getDataMapJob(Configuration configuration) throws 
IOException {
 String jobString = configuration.get(DATA_MAP_DSTR);
 if (jobString != null) {
   return (DataMapJob) 
ObjectSerializationUtil.convertStringToObject(jobString);
@@ -760,11 +768,9 @@ public class CarbonTableInputFormat extends 
FileInputFormat {
 List partitionsToPrune = 
getPartitionsToPrune(job.getConfiguration());
 List prunedBlocklets;
 if (dataMapJob != null) {
-  DistributableDataMapFormat datamapDstr =
-  new DistributableDataMapFormat(absoluteTableIdentifier, 
BlockletDataMap.NAME,
-  segmentIds, partitionsToPrune,
-  BlockletDataMapFactory.class.getName());
-  prunedBlocklets = dataMapJob.execute(datamapDstr, resolver);
+  prunedBlocklets =
+  getExtendedBlocklets(job, absoluteTableIdentifier, resolver, 
segmentIds, blockletMap,
+  dataMapJob, partitionsToPrune);
 } else {
   prunedBlocklets = blockletMap.prune(segmentIds, resolver, 
partitionsToPrune);
 }
@@ -809,6 +815,23 @@ public class CarbonTableInputFormat extends 
FileInputFormat {
 return resultFilterredBlocks;
   }
 
+  public List getExtendedBlocklets(JobContext job,
+  AbsoluteTableIdentifier absoluteTableIdentifier, FilterResolverIntf 
resolver,
+  List segmentIds, TableDataMap blockletMap, DataMapJob 
dataMapJob,
+  List partitionsToPrune) {
+List prunedBlocklets = new ArrayList<>();
+boolean distributedDataMaps = 
Boolean.parseBoolean(CarbonProperties.getInstance()
+.getProperty(CarbonCommonConstants.USE_DISTRIBUTED_DATAMAP,
+CarbonCommonConstants.USE_DISTRIBUTED_DATAMAP_DEFAULT));
+if (distributedDataMaps) {
+  String className = 
"org.apache.carbondata.hadoop.api.DistributableDataMapFormat";
+  FileInputFormat dataMapFormat =
+  createDataMapJob(absoluteTableIdentifier, segmentIds, 
partitionsToPrune, className);
+  prunedBlocklets = dataMapJob.execute((DistributableDataMapFormat) 
dataMapFormat, resolver);
+}
+return prunedBlocklets;
+  }
+
   private CarbonInputSplit convertToCarbonInputSplit(ExtendedBlocklet 
blocklet) throws IOException {
 org.apache.carbondata.hadoop.CarbonInputSplit split =
 
org.apache.carbondata.hadoop.CarbonInputSplit.from(blocklet.getSegmentId(),
@@ -819,6 +842,19 @@ public class CarbonTableInputFormat extends 
FileInputFormat {
 return split;
   }
 
+  public static FileInputFormat createDataMapJob(AbsoluteTableIdentifier 
absoluteTableIdentifier,
+  List segments, List partitionsToPrune, String 
clsName) {
+try {
+  Constructor cons = 
Class.forName(clsName).getDeclaredConstructors()[0];
+  return (FileInputFormat) cons
+  

[2/2] carbondata git commit: [CARBONDATA-2310] Refactored code to improve Distributable interface

2018-04-10 Thread manishgupta88
[CARBONDATA-2310] Refactored code to improve Distributable interface

Refactored code to improve Distributable interface

This closes #2134


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3c48df39
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3c48df39
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3c48df39

Branch: refs/heads/branch-1.3
Commit: 3c48df396f2bafc9efc8091fc7abefca089922d7
Parents: 31c7b50
Author: dhatchayani 
Authored: Tue Apr 3 11:19:43 2018 +0530
Committer: manishgupta88 
Committed: Tue Apr 10 16:16:27 2018 +0530

--
 .../org/apache/carbondata/core/cache/Cache.java |  10 ++
 .../dictionary/AbstractDictionaryCache.java |   4 +
 .../core/constants/CarbonCommonConstants.java   |   3 +
 .../core/datamap/dev/CacheableDataMap.java  |  47 ++
 .../carbondata/core/datamap/dev/DataMap.java|   3 +-
 .../core/datastore/BlockIndexStore.java |   4 +
 .../core/datastore/SegmentTaskIndexStore.java   |   4 +
 .../core/indexstore/AbstractMemoryDMStore.java  |  63 +++
 .../indexstore/BlockletDataMapIndexStore.java   |  92 --
 .../core/indexstore/SafeMemoryDMStore.java  |  94 +++
 .../TableBlockIndexUniqueIdentifier.java|   3 +-
 .../core/indexstore/UnsafeMemoryDMStore.java|  23 +--
 .../blockletindex/BlockletDataMap.java  | 169 +--
 .../BlockletDataMapDistributable.java   |  18 +-
 .../blockletindex/BlockletDataMapFactory.java   |  95 +++
 .../blockletindex/BlockletDataMapModel.java |  13 ++
 .../core/indexstore/row/DataMapRow.java |  13 +-
 .../core/indexstore/row/UnsafeDataMapRow.java   |   7 +-
 .../core/indexstore/schema/CarbonRowSchema.java |   4 +-
 .../core/util/BlockletDataMapUtil.java  | 140 +++
 .../carbondata/core/util/SessionParams.java |   5 +
 .../TestBlockletDataMapFactory.java | 108 
 .../apache/carbondata/hadoop/CacheClient.java   |  43 +
 .../hadoop/api/AbstractDataMapJob.java  |  43 +
 .../hadoop/api/CarbonTableInputFormat.java  |  61 ++-
 .../carbondata/hadoop/api/DataMapJob.java   |   6 +
 .../hadoop/util/CarbonInputFormatUtil.java  |  44 +
 .../carbondata/spark/rdd/CarbonScanRDD.scala|   9 +-
 .../carbondata/spark/rdd/SparkDataMapJob.scala  |   4 +-
 .../org/apache/spark/sql/CarbonCountStar.scala  |  13 ++
 .../execution/command/CarbonHiveCommands.scala  |   9 +
 31 files changed, 973 insertions(+), 181 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3c48df39/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
--
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/Cache.java 
b/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
index 04fa18a..6df36fc 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
@@ -20,6 +20,8 @@ package org.apache.carbondata.core.cache;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.carbondata.core.memory.MemoryException;
+
 /**
  * A semi-persistent mapping from keys to values. Cache entries are manually 
added using
  * #get(Key), #getAll(List) , and are stored in the cache until
@@ -69,6 +71,14 @@ public interface Cache {
   void invalidate(K key);
 
   /**
+   * This method will add the value to the cache for the given key
+   *
+   * @param key
+   * @param value
+   */
+  void put(K key, V value) throws IOException, MemoryException;
+
+  /**
* Access count of Cacheable entry will be decremented
*
* @param keys

http://git-wip-us.apache.org/repos/asf/carbondata/blob/3c48df39/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
 
b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
index 598d00e..9ed9007 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
@@ -59,6 +59,10 @@ public abstract class AbstractDictionaryCachehttp://git-wip-us.apache.org/repos/asf/carbondata/blob/3c48df39/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
--
diff --git 

Jenkins build became unstable: carbondata-master-spark-2.2 #275

2018-04-10 Thread Apache Jenkins Server
See 




Jenkins build became unstable: carbondata-master-spark-2.2 » Apache CarbonData :: Spark Common Test #275

2018-04-10 Thread Apache Jenkins Server
See 




Jenkins build became unstable: carbondata-master-spark-2.1 » Apache CarbonData :: Spark Common Test #2235

2018-04-10 Thread Apache Jenkins Server
See 




Jenkins build became unstable: carbondata-master-spark-2.1 #2235

2018-04-10 Thread Apache Jenkins Server
See 




carbondata git commit: [CARBONDATA-2326][Statistics] Fix NPE of statistics when spark.sql.execution.id is null

2018-04-10 Thread kumarvishal09
Repository: carbondata
Updated Branches:
  refs/heads/master cfb9a9a20 -> 8a5369d2b


[CARBONDATA-2326][Statistics] Fix NPE of statistics when spark.sql.execution.id 
is null

This closes #2151


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/8a5369d2
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/8a5369d2
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/8a5369d2

Branch: refs/heads/master
Commit: 8a5369d2b4fa889583f25a39dc88a343c92c2260
Parents: cfb9a9a
Author: QiangCai 
Authored: Tue Apr 10 10:07:53 2018 +0800
Committer: kumarvishal 
Committed: Tue Apr 10 15:38:35 2018 +0800

--
 .../carbondata/spark/rdd/CarbonScanRDD.scala| 44 ++--
 1 file changed, 23 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/8a5369d2/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 256e43d..df953da 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -174,27 +174,29 @@ class CarbonScanRDD(
 } finally {
   Profiler.invokeIfEnable {
 val endTime = System.currentTimeMillis()
-val executionId = 
spark.sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY).toLong
-Profiler.send(
-  GetPartition(
-executionId,
-tableInfo.getDatabaseName + "." + 
tableInfo.getFactTable.getTableName,
-tablePath,
-queryId,
-partitions.length,
-startTime,
-endTime,
-getSplitsStartTime,
-getSplitsEndTime,
-numSegments,
-numStreamSegments,
-numBlocks,
-distributeStartTime,
-distributeEndTime,
-if (filterExpression == null) "" else 
filterExpression.getStatement,
-if (columnProjection == null) "" else 
columnProjection.getAllColumns.mkString(",")
+val executionId = 
spark.sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
+if (executionId != null) {
+  Profiler.send(
+GetPartition(
+  executionId.toLong,
+  tableInfo.getDatabaseName + "." + 
tableInfo.getFactTable.getTableName,
+  tablePath,
+  queryId,
+  partitions.length,
+  startTime,
+  endTime,
+  getSplitsStartTime,
+  getSplitsEndTime,
+  numSegments,
+  numStreamSegments,
+  numBlocks,
+  distributeStartTime,
+  distributeEndTime,
+  if (filterExpression == null) "" else 
filterExpression.getStatement,
+  if (columnProjection == null) "" else 
columnProjection.getAllColumns.mkString(",")
+)
   )
-)
+}
   }
 }
   }
@@ -629,7 +631,7 @@ class CarbonScanRDD(
   recorder.recordStatistics(queryStatistic)
   // print executor query statistics for each task_id
   val statistics = recorder.statisticsForTask(taskId, queryStartTime)
-  if (statistics != null) {
+  if (statistics != null && executionId != null) {
 Profiler.invokeIfEnable {
   val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
   inputSplit.calculateLength()