[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827632 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( Review comment: formatted like: ``` partitoinDataList.map { case (partitionMap, splits) => ... } This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361828090 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala ## @@ -898,12 +896,32 @@ case class CarbonLoadDataCommand( sortScope: SortScopeOptions.SortScope, isDataFrame: Boolean): (LogicalPlan, Int, Option[RDD[InternalRow]]) = { // Converts the data as per the loading steps before give it to writer or sorter -val updatedRdd = convertData( +val convertedRdd = convertData( rdd, sparkSession, loadModel, isDataFrame, partitionValues) +val updatedRdd = if (isDataFrame) { + val columnCount = loadModel.getCsvHeaderColumns.length + convertedRdd.map{ row => Review comment: add space before `{` This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361828078 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala ## @@ -794,8 +792,8 @@ case class CarbonLoadDataCommand( sparkSession, operationContext) val logicalPlan = if (sortScope == SortScopeOptions.SortScope.GLOBAL_SORT) { -var numPartitions = - CarbonDataProcessorUtil.getGlobalSortPartitions(carbonLoadModel.getGlobalSortPartitions) +var numPartitions = 1 +// CarbonDataProcessorUtil.getGlobalSortPartitions(carbonLoadModel.getGlobalSortPartitions) Review comment: remove it This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361828062 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( + partitionData => { +index = index + 1 +val partition = partitionData._1 +val splits = partitionData._2 +LOGGER.info(s"start to load ${splits.size} files into " + + s"${table.getDatabaseName}.${table.getTableName}") +val dataFrame = createInputDataFrameOfInternalRow(spark, table, splits) +val columns = dataFrame.columns +val header = columns.mkString(",") +val selectColumns = columns.filter(!partition.contains(_)) +val selectedDataFrame = dataFrame.select(selectColumns.head, selectColumns.tail : _*) +val loadCommand = CarbonLoadDataCommand( + databaseNameOp = Option(table.getDatabaseName), + tableName = table.getTableName, + factPathFromUser = null, + dimFilesPath = Seq(), + options = scala.collection.immutable.Map("fileheader" -> header), + isOverwriteTable = false, + inputSqlString = null, + dataFrame = Some(selectedDataFrame), + updateModel = None, + tableInfoOp = None, + internalOptions = Map.empty, + partition = partition +) +loadCommand.run(spark) + } +) LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") } + private def listPartitionFiles(stageInputs : Seq[StageInput]): + Seq[(Map[String, Option[String]], Seq[InputSplit])] = { +val partitionMap = new util.HashMap[Map[String, Option[String]], util.List[InputSplit]]() +stageInputs.foreach( + stageInput => { +val locations = stageInput.getLocations.asScala +locations.foreach( + location => { +val partition = location.getPartitions.asScala.map(t => (t._1, Option(t._2))).toMap Review comment: Why Option is required? Can't we remove it? This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827985 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( + partitionData => { +index = index + 1 +val partition = partitionData._1 +val splits = partitionData._2 +LOGGER.info(s"start to load ${splits.size} files into " + + s"${table.getDatabaseName}.${table.getTableName}") Review comment: suggest mention it is for which partition in the log This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827670 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( + partitionData => { +index = index + 1 +val partition = partitionData._1 +val splits = partitionData._2 +LOGGER.info(s"start to load ${splits.size} files into " + + s"${table.getDatabaseName}.${table.getTableName}") +val dataFrame = createInputDataFrameOfInternalRow(spark, table, splits) +val columns = dataFrame.columns +val header = columns.mkString(",") +val selectColumns = columns.filter(!partition.contains(_)) +val selectedDataFrame = dataFrame.select(selectColumns.head, selectColumns.tail : _*) +val loadCommand = CarbonLoadDataCommand( + databaseNameOp = Option(table.getDatabaseName), + tableName = table.getTableName, + factPathFromUser = null, + dimFilesPath = Seq(), + options = scala.collection.immutable.Map("fileheader" -> header), + isOverwriteTable = false, + inputSqlString = null, + dataFrame = Some(selectedDataFrame), + updateModel = None, + tableInfoOp = None, + internalOptions = Map.empty, + partition = partition +) +loadCommand.run(spark) + } +) LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") } + private def listPartitionFiles(stageInputs : Seq[StageInput]): Review comment: describe the return tuple in comment This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827670 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( + partitionData => { +index = index + 1 +val partition = partitionData._1 +val splits = partitionData._2 +LOGGER.info(s"start to load ${splits.size} files into " + + s"${table.getDatabaseName}.${table.getTableName}") +val dataFrame = createInputDataFrameOfInternalRow(spark, table, splits) +val columns = dataFrame.columns +val header = columns.mkString(",") +val selectColumns = columns.filter(!partition.contains(_)) +val selectedDataFrame = dataFrame.select(selectColumns.head, selectColumns.tail : _*) +val loadCommand = CarbonLoadDataCommand( + databaseNameOp = Option(table.getDatabaseName), + tableName = table.getTableName, + factPathFromUser = null, + dimFilesPath = Seq(), + options = scala.collection.immutable.Map("fileheader" -> header), + isOverwriteTable = false, + inputSqlString = null, + dataFrame = Some(selectedDataFrame), + updateModel = None, + tableInfoOp = None, + internalOptions = Map.empty, + partition = partition +) +loadCommand.run(spark) + } +) LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") } + private def listPartitionFiles(stageInputs : Seq[StageInput]): Review comment: describe the return value in comment, otherwise it is not easy to understand This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827656 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( + partitionData => { +index = index + 1 +val partition = partitionData._1 +val splits = partitionData._2 +LOGGER.info(s"start to load ${splits.size} files into " + + s"${table.getDatabaseName}.${table.getTableName}") +val dataFrame = createInputDataFrameOfInternalRow(spark, table, splits) +val columns = dataFrame.columns +val header = columns.mkString(",") +val selectColumns = columns.filter(!partition.contains(_)) +val selectedDataFrame = dataFrame.select(selectColumns.head, selectColumns.tail : _*) +val loadCommand = CarbonLoadDataCommand( + databaseNameOp = Option(table.getDatabaseName), + tableName = table.getTableName, + factPathFromUser = null, + dimFilesPath = Seq(), + options = scala.collection.immutable.Map("fileheader" -> header), + isOverwriteTable = false, + inputSqlString = null, + dataFrame = Some(selectedDataFrame), + updateModel = None, + tableInfoOp = None, + internalOptions = Map.empty, + partition = partition +) +loadCommand.run(spark) + } +) LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") } + private def listPartitionFiles(stageInputs : Seq[StageInput]): Review comment: move `:` to next line This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827632 ## File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala ## @@ -269,11 +281,90 @@ case class CarbonInsertFromStageCommand( SparkSQLUtil.sessionState(spark).newHadoopConf() ).map { row => (row._1, FailureCauses.NONE == row._2._2.failureCauses) - } +} + +LOGGER.info(s"finish data loading, time taken ${System.currentTimeMillis() - start}ms") + } + + /** + * Start global sort loading of partition table + */ + private def startLoadingWithPartition( + spark: SparkSession, + table: CarbonTable, + loadModel: CarbonLoadModel, + stageInput: Seq[StageInput] +): Unit = { +val partitionDataList = listPartitionFiles(stageInput) +val start = System.currentTimeMillis() +var index = 0 +partitionDataList.map( Review comment: formatted like: ``` partitoinDataList.map { partitionData => ... } This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] [carbondata] jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table
jackylk commented on a change in pull request #3542: [CARBONDATA-3640] Insert from stage command support partition table URL: https://github.com/apache/carbondata/pull/3542#discussion_r361827544 ## File path: integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala ## @@ -189,4 +194,20 @@ class TestCarbonPartitionWriter extends QueryTest { output.asScala } + private def delDir(dir: File): Boolean = { +if (dir.isDirectory) { + val children = dir.list + if (children != null) { +var i = 0 +while (i < children.length) { Review comment: declare a val like `val len = children.length` This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services