Repository: spark
Updated Branches:
  refs/heads/master 9a54de16e -> 9016af3f2


[SPARK-2888] [SQL] Fix addColumnMetadataToConf in HiveTableScan

JIRA: https://issues.apache.org/jira/browse/SPARK-2888

Author: Yin Huai <[email protected]>

Closes #1817 from yhuai/fixAddColumnMetadataToConf and squashes the following 
commits:

fba728c [Yin Huai] Fix addColumnMetadataToConf.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/9016af3f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/9016af3f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/9016af3f

Branch: refs/heads/master
Commit: 9016af3f2729101027e33593e094332f05f48d92
Parents: 9a54de1
Author: Yin Huai <[email protected]>
Authored: Fri Aug 8 11:01:51 2014 -0700
Committer: Michael Armbrust <[email protected]>
Committed: Fri Aug 8 11:01:51 2014 -0700

----------------------------------------------------------------------
 .../spark/sql/hive/execution/HiveTableScan.scala   | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/9016af3f/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
index 8920e2a..577ca92 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
@@ -72,17 +72,12 @@ case class HiveTableScan(
   }
 
   private def addColumnMetadataToConf(hiveConf: HiveConf) {
-    // Specifies IDs and internal names of columns to be scanned.
-    val neededColumnIDs = attributes.map(a => 
relation.output.indexWhere(_.name == a.name): Integer)
-    val columnInternalNames = 
neededColumnIDs.map(HiveConf.getColumnInternalName(_)).mkString(",")
-
-    if (attributes.size == relation.output.size) {
-      // SQLContext#pruneFilterProject guarantees no duplicated value in 
`attributes`
-      ColumnProjectionUtils.setFullyReadColumns(hiveConf)
-    } else {
-      ColumnProjectionUtils.appendReadColumnIDs(hiveConf, neededColumnIDs)
-    }
+    // Specifies needed column IDs for those non-partitioning columns.
+    val neededColumnIDs =
+      attributes.map(a =>
+        relation.attributes.indexWhere(_.name == a.name): 
Integer).filter(index => index >= 0)
 
+    ColumnProjectionUtils.appendReadColumnIDs(hiveConf, neededColumnIDs)
     ColumnProjectionUtils.appendReadColumnNames(hiveConf, 
attributes.map(_.name))
 
     // Specifies types and object inspectors of columns to be scanned.
@@ -99,7 +94,7 @@ case class HiveTableScan(
       .mkString(",")
 
     hiveConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypeNames)
-    hiveConf.set(serdeConstants.LIST_COLUMNS, columnInternalNames)
+    hiveConf.set(serdeConstants.LIST_COLUMNS, 
relation.attributes.map(_.name).mkString(","))
   }
 
   addColumnMetadataToConf(context.hiveconf)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to