Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/18747#discussion_r146412033
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryTableScanExec.scala
---
@@ -23,21 +23,66 @@ import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning,
Partitioning}
-import org.apache.spark.sql.execution.LeafExecNode
-import org.apache.spark.sql.execution.metric.SQLMetrics
-import org.apache.spark.sql.types.UserDefinedType
+import org.apache.spark.sql.execution.{ColumnarBatchScan, LeafExecNode,
WholeStageCodegenExec}
+import org.apache.spark.sql.execution.vectorized._
+import org.apache.spark.sql.types._
case class InMemoryTableScanExec(
attributes: Seq[Attribute],
predicates: Seq[Expression],
@transient relation: InMemoryRelation)
- extends LeafExecNode {
+ extends LeafExecNode with ColumnarBatchScan {
override protected def innerChildren: Seq[QueryPlan[_]] = Seq(relation)
++ super.innerChildren
- override lazy val metrics = Map(
- "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"))
+ override def vectorTypes: Option[Seq[String]] =
+
Option(Seq.fill(attributes.length)(classOf[OnHeapColumnVector].getName))
+
+ /**
+ * If true, get data from ColumnVector in ColumnarBatch, which are
generally faster.
+ * If false, get data from UnsafeRow build from ColumnVector
+ */
+ override val supportCodegen: Boolean = {
+ // In the initial implementation, for ease of review
+ // support only primitive data types and # of fields is less than
wholeStageMaxNumFields
+ relation.schema.fields.find(f => f.dataType match {
--- End diff --
rewrite `find(...)` -> `forall`?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]