Github user manishgupta88 commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/3027#discussion_r244271732
  
    --- Diff: 
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableAddColumnCommand.scala
 ---
    @@ -93,11 +93,17 @@ private[sql] case class 
CarbonAlterTableAddColumnCommand(
           schemaEvolutionEntry.setAdded(newCols.toList.asJava)
           val thriftTable = schemaConverter
             .fromWrapperToExternalTableInfo(wrapperTableInfo, dbName, 
tableName)
    +      // carbon columns based on schema order
    +      val carbonColumns = 
carbonTable.getCreateOrderColumn(carbonTable.getTableName).asScala
    +        .collect { case carbonColumn => carbonColumn.getColumnSchema }
    +        .filter(!_.isInvisible)
    +      // sort the new columns based on schema order
    +      val sortedColsBasedActualSchemaOrder = newCols.sortBy(a => 
a.getSchemaOrdinal)
           val (tableIdentifier, schemaParts, cols) = 
AlterTableUtil.updateSchemaInfo(
               carbonTable,
               
schemaConverter.fromWrapperToExternalSchemaEvolutionEntry(schemaEvolutionEntry),
               thriftTable,
    -          Some(newCols))(sparkSession)
    +          Some(carbonColumns ++ 
sortedColsBasedActualSchemaOrder))(sparkSession)
    --- End diff --
    
    `AlterTableUtil.updateSchemaInfo` is not making use of columns passed so 
remove the method argument and use columns for changing the hive schema


---

Reply via email to