Repository: spark
Updated Branches:
  refs/heads/master 2badb58cd -> 97594c29b


http://git-wip-us.apache.org/repos/asf/spark/blob/97594c29/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
index ec7e53e..2a54163 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
@@ -120,7 +120,7 @@ class HadoopTableReader(
     val hadoopRDD = createHadoopRdd(tableDesc, inputPathStr, ifc)
 
     val attrsWithIndex = attributes.zipWithIndex
-    val mutableRow = new SpecificMutableRow(attributes.map(_.dataType))
+    val mutableRow = new SpecificInternalRow(attributes.map(_.dataType))
 
     val deserializedHadoopRDD = hadoopRDD.mapPartitions { iter =>
       val hconf = broadcastedHadoopConf.value.value
@@ -215,7 +215,7 @@ class HadoopTableReader(
       val tableDesc = relation.tableDesc
       val broadcastedHiveConf = _broadcastedHadoopConf
       val localDeserializer = partDeserializer
-      val mutableRow = new SpecificMutableRow(attributes.map(_.dataType))
+      val mutableRow = new SpecificInternalRow(attributes.map(_.dataType))
 
       // Splits all attributes into two groups, partition key attributes and 
those that are not.
       // Attached indices indicate the position of each attribute in the 
output schema.
@@ -224,7 +224,7 @@ class HadoopTableReader(
           relation.partitionKeys.contains(attr)
         }
 
-      def fillPartitionKeys(rawPartValues: Array[String], row: MutableRow): 
Unit = {
+      def fillPartitionKeys(rawPartValues: Array[String], row: InternalRow): 
Unit = {
         partitionKeyAttrs.foreach { case (attr, ordinal) =>
           val partOrdinal = relation.partitionKeys.indexOf(attr)
           row(ordinal) = Cast(Literal(rawPartValues(partOrdinal)), 
attr.dataType).eval(null)
@@ -360,7 +360,7 @@ private[hive] object HadoopTableReader extends 
HiveInspectors with Logging {
       iterator: Iterator[Writable],
       rawDeser: Deserializer,
       nonPartitionKeyAttrs: Seq[(Attribute, Int)],
-      mutableRow: MutableRow,
+      mutableRow: InternalRow,
       tableDeser: Deserializer): Iterator[InternalRow] = {
 
     val soi = if 
(rawDeser.getObjectInspector.equals(tableDeser.getObjectInspector)) {
@@ -381,43 +381,43 @@ private[hive] object HadoopTableReader extends 
HiveInspectors with Logging {
      * Builds specific unwrappers ahead of time according to object inspector
      * types to avoid pattern matching and branching costs per row.
      */
-    val unwrappers: Seq[(Any, MutableRow, Int) => Unit] = fieldRefs.map {
+    val unwrappers: Seq[(Any, InternalRow, Int) => Unit] = fieldRefs.map {
       _.getFieldObjectInspector match {
         case oi: BooleanObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => 
row.setBoolean(ordinal, oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => 
row.setBoolean(ordinal, oi.get(value))
         case oi: ByteObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => row.setByte(ordinal, 
oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => row.setByte(ordinal, 
oi.get(value))
         case oi: ShortObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => row.setShort(ordinal, 
oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => 
row.setShort(ordinal, oi.get(value))
         case oi: IntObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => row.setInt(ordinal, 
oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => row.setInt(ordinal, 
oi.get(value))
         case oi: LongObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => row.setLong(ordinal, 
oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => row.setLong(ordinal, 
oi.get(value))
         case oi: FloatObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => row.setFloat(ordinal, 
oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => 
row.setFloat(ordinal, oi.get(value))
         case oi: DoubleObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) => 
row.setDouble(ordinal, oi.get(value))
+          (value: Any, row: InternalRow, ordinal: Int) => 
row.setDouble(ordinal, oi.get(value))
         case oi: HiveVarcharObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) =>
+          (value: Any, row: InternalRow, ordinal: Int) =>
             row.update(ordinal, 
UTF8String.fromString(oi.getPrimitiveJavaObject(value).getValue))
         case oi: HiveCharObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) =>
+          (value: Any, row: InternalRow, ordinal: Int) =>
             row.update(ordinal, 
UTF8String.fromString(oi.getPrimitiveJavaObject(value).getValue))
         case oi: HiveDecimalObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) =>
+          (value: Any, row: InternalRow, ordinal: Int) =>
             row.update(ordinal, HiveShim.toCatalystDecimal(oi, value))
         case oi: TimestampObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) =>
+          (value: Any, row: InternalRow, ordinal: Int) =>
             row.setLong(ordinal, 
DateTimeUtils.fromJavaTimestamp(oi.getPrimitiveJavaObject(value)))
         case oi: DateObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) =>
+          (value: Any, row: InternalRow, ordinal: Int) =>
             row.setInt(ordinal, 
DateTimeUtils.fromJavaDate(oi.getPrimitiveJavaObject(value)))
         case oi: BinaryObjectInspector =>
-          (value: Any, row: MutableRow, ordinal: Int) =>
+          (value: Any, row: InternalRow, ordinal: Int) =>
             row.update(ordinal, oi.getPrimitiveJavaObject(value))
         case oi =>
           val unwrapper = unwrapperFor(oi)
-          (value: Any, row: MutableRow, ordinal: Int) => row(ordinal) = 
unwrapper(value)
+          (value: Any, row: InternalRow, ordinal: Int) => row(ordinal) = 
unwrapper(value)
       }
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/97594c29/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
index c553c03..1025b8f 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
@@ -124,7 +124,7 @@ case class ScriptTransformation(
         } else {
           null
         }
-        val mutableRow = new SpecificMutableRow(output.map(_.dataType))
+        val mutableRow = new SpecificInternalRow(output.map(_.dataType))
 
         @transient
         lazy val unwrappers = 
outputSoi.getAllStructFieldRefs.asScala.map(unwrapperFor)

http://git-wip-us.apache.org/repos/asf/spark/blob/97594c29/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
index d549135..4203308 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
@@ -329,17 +329,17 @@ private[hive] case class HiveUDAFFunction(
   // buffer for it.
   override def aggBufferSchema: StructType = StructType(Nil)
 
-  override def update(_buffer: MutableRow, input: InternalRow): Unit = {
+  override def update(_buffer: InternalRow, input: InternalRow): Unit = {
     val inputs = inputProjection(input)
     function.iterate(buffer, wrap(inputs, wrappers, cached, inputDataTypes))
   }
 
-  override def merge(buffer1: MutableRow, buffer2: InternalRow): Unit = {
+  override def merge(buffer1: InternalRow, buffer2: InternalRow): Unit = {
     throw new UnsupportedOperationException(
       "Hive UDAF doesn't support partial aggregate")
   }
 
-  override def initialize(_buffer: MutableRow): Unit = {
+  override def initialize(_buffer: InternalRow): Unit = {
     buffer = function.getNewAggregationBuffer
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/97594c29/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
index 15b72d8..e94f49e 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
@@ -281,7 +281,7 @@ private[orc] object OrcRelation extends HiveInspectors {
       maybeStructOI: Option[StructObjectInspector],
       iterator: Iterator[Writable]): Iterator[InternalRow] = {
     val deserializer = new OrcSerde
-    val mutableRow = new SpecificMutableRow(dataSchema.map(_.dataType))
+    val mutableRow = new SpecificInternalRow(dataSchema.map(_.dataType))
     val unsafeProjection = UnsafeProjection.create(dataSchema)
 
     def unwrap(oi: StructObjectInspector): Iterator[InternalRow] = {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to