vikramahuja1001 commented on a change in pull request #4141:
URL: https://github.com/apache/carbondata/pull/4141#discussion_r644562441
##########
File path:
integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
##########
@@ -229,8 +270,388 @@ object CarbonToSparkAdapter {
.unwrapped
.asInstanceOf[HiveExternalCatalog]
}
+
+ def createFilePartition(index: Int, files: ArrayBuffer[PartitionedFile]):
FilePartition = {
+ FilePartition(index, files.toArray)
+ }
+
+ def stringToTimestamp(timestamp: String): Option[Long] = {
+ DateTimeUtils.stringToTimestamp(UTF8String.fromString(timestamp))
+ }
+
+ def stringToTime(value: String): java.util.Date = {
+ DateTimeUtils.stringToTime(value)
+ }
+
+ def timeStampToString(timeStamp: Long): String = {
+ DateTimeUtils.timestampToString(timeStamp)
+ }
+
+ def getTableIdentifier(u: UnresolvedRelation): Some[TableIdentifier] = {
+ Some(u.tableIdentifier)
+ }
+
+ def dateToString(date: Int): String = {
+ DateTimeUtils.dateToString(date.toString.toInt)
+ }
+
+ def getProcessingTime: String => Trigger = {
+ Trigger.ProcessingTime
+ }
+
+ def addTaskCompletionListener[U](f: => U) {
+ TaskContext.get().addTaskCompletionListener { context =>
+ f
+ }
+ }
+
+ def createShuffledRowRDD(sparkContext: SparkContext, localTopK:
RDD[InternalRow],
+ child: SparkPlan, serializer: Serializer): ShuffledRowRDD = {
+ new ShuffledRowRDD(
+ ShuffleExchangeExec.prepareShuffleDependency(
+ localTopK, child.output, SinglePartition, serializer))
+ }
+
+ def getInsertIntoCommand(table: LogicalPlan,
+ partition: Map[String, Option[String]],
+ query: LogicalPlan,
+ overwrite: Boolean,
+ ifPartitionNotExists: Boolean): InsertIntoTable = {
+ InsertIntoTable(
+ table,
+ partition,
+ query,
+ overwrite,
+ ifPartitionNotExists)
+ }
+
+ def getExplainCommandObj(logicalPlan: LogicalPlan = OneRowRelation(),
+ mode: Option[String]) : ExplainCommand = {
+ ExplainCommand(logicalPlan, mode.isDefined)
+ }
+
+ def invokeAnalyzerExecute(analyzer: Analyzer,
+ plan: LogicalPlan): LogicalPlan = {
+ analyzer.executeAndCheck(plan)
+ }
+
+ def normalizeExpressions(r: NamedExpression, attrs: AttributeSeq):
NamedExpression = {
+ QueryPlan.normalizeExprId(r, attrs)
+ }
+
+ def getBuildRight: BuildSide = {
+ BuildRight
+ }
+
+ def getBuildLeft: BuildSide = {
+ BuildLeft
+ }
+
+ type CarbonBuildSideType = BuildSide
+ type InsertIntoStatementWrapper = InsertIntoTable
+
+ def withNewExecutionId[T](sparkSession: SparkSession, queryExecution:
QueryExecution): T => T = {
+ SQLExecution.withNewExecutionId(sparkSession, queryExecution)(_)
+ }
+
+ def createJoinNode(child: LogicalPlan,
+ targetTable: LogicalPlan,
+ joinType: JoinType,
+ condition: Option[Expression]): Join = {
+ Join(child, targetTable, joinType, condition)
+ }
+
+ def getPartitionsFromInsert(x: InsertIntoStatementWrapper): Map[String,
Option[String]] = {
+ x.partition
+ }
+
+ def getTableIdentifier(parts: TableIdentifier): TableIdentifier = {
+ parts
+ }
+
+ def getStatisticsObj(outputList: Seq[NamedExpression],
+ plan: LogicalPlan, stats: Statistics,
+ aliasMap: Option[AttributeMap[Attribute]] = None): Statistics = {
+ val output = outputList.map(_.toAttribute)
+ val mapSeq = plan.collect { case n: logical.LeafNode => n }.map {
+ table => AttributeMap(table.output.zip(output))
+ }
+ val rewrites = mapSeq.head
+ val attributes: AttributeMap[ColumnStat] = stats.attributeStats
+ var attributeStats = AttributeMap(attributes.iterator
+ .map { pair => (rewrites(pair._1), pair._2) }.toSeq)
+ if (aliasMap.isDefined) {
+ attributeStats = AttributeMap(
+ attributeStats.map(pair => (aliasMap.get(pair._1), pair._2)).toSeq)
+ }
+ Statistics(stats.sizeInBytes, stats.rowCount, attributeStats, stats.hints)
+ }
+
+ def createRefreshTableCommand(tableIdentifier: TableIdentifier):
RefreshTable = {
+ RefreshTable(tableIdentifier)
+ }
+
+ type RefreshTables = RefreshTable
Review comment:
done
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]