VenuReddy2103 commented on a change in pull request #3776:
URL: https://github.com/apache/carbondata/pull/3776#discussion_r436633946



##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -302,6 +317,54 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
     commitJobFinal(context, loadModel, operationContext, carbonTable, 
uniqueId);
   }
 
+  @SuppressWarnings("unchecked")
+  private void writeSegmentWithoutMergeIndex(JobContext context, 
CarbonLoadModel loadModel,
+      String segmentFileName, String partitionPath) throws IOException {
+    Map<String, String> IndexFileNameMap = (Map<String, String>) 
ObjectSerializationUtil

Review comment:
       Modified

##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -302,6 +317,54 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
     commitJobFinal(context, loadModel, operationContext, carbonTable, 
uniqueId);
   }
 
+  @SuppressWarnings("unchecked")
+  private void writeSegmentWithoutMergeIndex(JobContext context, 
CarbonLoadModel loadModel,
+      String segmentFileName, String partitionPath) throws IOException {
+    Map<String, String> IndexFileNameMap = (Map<String, String>) 
ObjectSerializationUtil
+        
.convertStringToObject(context.getConfiguration().get("carbon.index.files.name"));
+    List<String> partitionList =
+        (List<String>) 
ObjectSerializationUtil.convertStringToObject(partitionPath);
+    SegmentFileStore.SegmentFile finalSegmentFile = null;
+    boolean isRelativePath;
+    String path;
+    for (String partition : partitionList) {
+      isRelativePath = false;
+      path = partition;
+      if (path.startsWith(loadModel.getTablePath())) {
+        path = path.substring(loadModel.getTablePath().length());
+        isRelativePath = true;
+      }
+      SegmentFileStore.SegmentFile segmentFile = new 
SegmentFileStore.SegmentFile();
+      SegmentFileStore.FolderDetails folderDetails = new 
SegmentFileStore.FolderDetails();
+      Set<String> set = new HashSet<String>();

Review comment:
       Removed this variable itself now. Directly setting it in folder details.
   
`folderDetails.setFiles(Collections.singleton(indexFileNameMap.get(partition)));`

##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -302,6 +317,54 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
     commitJobFinal(context, loadModel, operationContext, carbonTable, 
uniqueId);
   }
 
+  @SuppressWarnings("unchecked")
+  private void writeSegmentWithoutMergeIndex(JobContext context, 
CarbonLoadModel loadModel,
+      String segmentFileName, String partitionPath) throws IOException {
+    Map<String, String> IndexFileNameMap = (Map<String, String>) 
ObjectSerializationUtil
+        
.convertStringToObject(context.getConfiguration().get("carbon.index.files.name"));
+    List<String> partitionList =
+        (List<String>) 
ObjectSerializationUtil.convertStringToObject(partitionPath);
+    SegmentFileStore.SegmentFile finalSegmentFile = null;
+    boolean isRelativePath;
+    String path;
+    for (String partition : partitionList) {
+      isRelativePath = false;
+      path = partition;
+      if (path.startsWith(loadModel.getTablePath())) {
+        path = path.substring(loadModel.getTablePath().length());
+        isRelativePath = true;
+      }
+      SegmentFileStore.SegmentFile segmentFile = new 
SegmentFileStore.SegmentFile();
+      SegmentFileStore.FolderDetails folderDetails = new 
SegmentFileStore.FolderDetails();
+      Set<String> set = new HashSet<String>();

Review comment:
       Modified

##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -302,6 +317,54 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
     commitJobFinal(context, loadModel, operationContext, carbonTable, 
uniqueId);
   }
 
+  @SuppressWarnings("unchecked")
+  private void writeSegmentWithoutMergeIndex(JobContext context, 
CarbonLoadModel loadModel,
+      String segmentFileName, String partitionPath) throws IOException {
+    Map<String, String> IndexFileNameMap = (Map<String, String>) 
ObjectSerializationUtil
+        
.convertStringToObject(context.getConfiguration().get("carbon.index.files.name"));
+    List<String> partitionList =
+        (List<String>) 
ObjectSerializationUtil.convertStringToObject(partitionPath);
+    SegmentFileStore.SegmentFile finalSegmentFile = null;
+    boolean isRelativePath;
+    String path;
+    for (String partition : partitionList) {
+      isRelativePath = false;
+      path = partition;
+      if (path.startsWith(loadModel.getTablePath())) {
+        path = path.substring(loadModel.getTablePath().length());
+        isRelativePath = true;
+      }
+      SegmentFileStore.SegmentFile segmentFile = new 
SegmentFileStore.SegmentFile();
+      SegmentFileStore.FolderDetails folderDetails = new 
SegmentFileStore.FolderDetails();
+      Set<String> set = new HashSet<String>();
+      set.add(IndexFileNameMap.get(partition));
+      folderDetails.setFiles(set);
+      List<String> partitions = new ArrayList<String>();

Review comment:
       Modified

##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -302,6 +317,54 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
     commitJobFinal(context, loadModel, operationContext, carbonTable, 
uniqueId);
   }
 
+  @SuppressWarnings("unchecked")
+  private void writeSegmentWithoutMergeIndex(JobContext context, 
CarbonLoadModel loadModel,
+      String segmentFileName, String partitionPath) throws IOException {
+    Map<String, String> IndexFileNameMap = (Map<String, String>) 
ObjectSerializationUtil
+        
.convertStringToObject(context.getConfiguration().get("carbon.index.files.name"));
+    List<String> partitionList =
+        (List<String>) 
ObjectSerializationUtil.convertStringToObject(partitionPath);
+    SegmentFileStore.SegmentFile finalSegmentFile = null;
+    boolean isRelativePath;
+    String path;

Review comment:
       Modified

##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -302,6 +317,54 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
     commitJobFinal(context, loadModel, operationContext, carbonTable, 
uniqueId);
   }
 
+  @SuppressWarnings("unchecked")
+  private void writeSegmentWithoutMergeIndex(JobContext context, 
CarbonLoadModel loadModel,

Review comment:
       Added header for this private method

##########
File path: 
core/src/main/java/org/apache/carbondata/core/preagg/TimeSeriesUDF.java
##########
@@ -176,9 +176,9 @@ private void initialize() {
     } catch (IllegalArgumentException ex) {
       LOGGER.warn("Invalid value set for first of the week. Considering the 
default value as: "
           + CarbonCommonConstants.CARBON_TIMESERIES_FIRST_DAY_OF_WEEK_DEFAULT);
-      firstDayOfWeek = DaysOfWeekEnum.valueOf(CarbonProperties.getInstance()
-          
.getProperty(CarbonCommonConstants.CARBON_TIMESERIES_FIRST_DAY_OF_WEEK_DEFAULT)
-          .toUpperCase()).getOrdinal();
+      firstDayOfWeek =

Review comment:
       Added this info to PR description

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
##########
@@ -566,6 +567,18 @@ class StandardPartitionTableLoadingTestCase extends 
QueryTest with BeforeAndAfte
     assert(ex.getMessage().equalsIgnoreCase("Cannot use all columns for 
partition columns;"))
   }
 
+  test("test partition without merge index files for segment") {
+    sql("DROP TABLE IF EXISTS new_par")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT, 
"false")
+    sql("CREATE TABLE new_par (a INT, b INT) PARTITIONED BY (country STRING) 
STORED AS carbondata")
+    sql("INSERT INTO new_par PARTITION(country='India') SELECT 1,2")
+    sql("INSERT INTO new_par PARTITION(country='India') SELECT 3,4")
+    sql("INSERT INTO new_par PARTITION(country='China') SELECT 5,6")
+    sql("INSERT INTO new_par PARTITION(country='China') SELECT 7,8")
+    checkAnswer(sql("SELECT COUNT(*) FROM new_par"), Seq(Row(4)))

Review comment:
       I think, it is redundant to do that. If segment files are not present or 
index files are not present, this query check will fail.

##########
File path: 
hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
##########
@@ -266,7 +270,16 @@ private void commitJobForPartition(JobContext context, 
boolean overwriteSet,
         && operationContext != null) {
       uuid = operationContext.getProperty("uuid").toString();
     }
+    String segmentFileName = SegmentFileStore.genSegmentFileName(
+        loadModel.getSegmentId(), 
String.valueOf(loadModel.getFactTimeStamp()));
+    boolean isMergeIndex = Boolean.parseBoolean(CarbonProperties.getInstance()

Review comment:
       This issue is applicable only when table is partitioned. I think, it is 
appropriate to do inside the `commitJobForPartition()` 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to