cloud-fan commented on a change in pull request #29199:
URL: https://github.com/apache/spark/pull/29199#discussion_r464214535
##########
File path:
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala
##########
@@ -172,66 +118,94 @@ case class HiveScriptTransformationExec(
if (!hasNext) {
throw new NoSuchElementException
}
- if (outputSerde == null) {
- val prevLine = curLine
- curLine = reader.readLine()
- if (!ioschema.schemaLess) {
- new GenericInternalRow(
-
prevLine.split(ioschema.outputRowFormatMap("TOK_TABLEROWFORMATFIELD"))
- .map(CatalystTypeConverters.convertToCatalyst))
+ val raw = outputSerde.deserialize(scriptOutputWritable)
+ scriptOutputWritable = null
+ val dataList = outputSoi.getStructFieldsDataAsList(raw)
+ var i = 0
+ while (i < dataList.size()) {
+ if (dataList.get(i) == null) {
+ mutableRow.setNullAt(i)
} else {
- new GenericInternalRow(
-
prevLine.split(ioschema.outputRowFormatMap("TOK_TABLEROWFORMATFIELD"), 2)
- .map(CatalystTypeConverters.convertToCatalyst))
+ unwrappers(i)(dataList.get(i), mutableRow, i)
}
- } else {
- val raw = outputSerde.deserialize(scriptOutputWritable)
- scriptOutputWritable = null
- val dataList = outputSoi.getStructFieldsDataAsList(raw)
- var i = 0
- while (i < dataList.size()) {
- if (dataList.get(i) == null) {
- mutableRow.setNullAt(i)
- } else {
- unwrappers(i)(dataList.get(i), mutableRow, i)
- }
- i += 1
- }
- mutableRow
+ i += 1
}
+ mutableRow
}
}
+ }
+
+ override def processIterator(
+ inputIterator: Iterator[InternalRow],
+ hadoopConf: Configuration): Iterator[InternalRow] = {
+
+ val (outputStream, proc, inputStream, stderrBuffer) = initProc
+
+ // This nullability is a performance optimization in order to avoid an
Option.foreach() call
+ // inside of a loop
+ @Nullable val (inputSerde, inputSoi) = initInputSerDe(ioschema,
input).getOrElse((null, null))
Review comment:
why do we use `@Nullable` for local variables?
##########
File path:
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala
##########
@@ -172,66 +118,94 @@ case class HiveScriptTransformationExec(
if (!hasNext) {
throw new NoSuchElementException
}
- if (outputSerde == null) {
- val prevLine = curLine
- curLine = reader.readLine()
- if (!ioschema.schemaLess) {
- new GenericInternalRow(
-
prevLine.split(ioschema.outputRowFormatMap("TOK_TABLEROWFORMATFIELD"))
- .map(CatalystTypeConverters.convertToCatalyst))
+ val raw = outputSerde.deserialize(scriptOutputWritable)
+ scriptOutputWritable = null
+ val dataList = outputSoi.getStructFieldsDataAsList(raw)
+ var i = 0
+ while (i < dataList.size()) {
+ if (dataList.get(i) == null) {
+ mutableRow.setNullAt(i)
} else {
- new GenericInternalRow(
-
prevLine.split(ioschema.outputRowFormatMap("TOK_TABLEROWFORMATFIELD"), 2)
- .map(CatalystTypeConverters.convertToCatalyst))
+ unwrappers(i)(dataList.get(i), mutableRow, i)
}
- } else {
- val raw = outputSerde.deserialize(scriptOutputWritable)
- scriptOutputWritable = null
- val dataList = outputSoi.getStructFieldsDataAsList(raw)
- var i = 0
- while (i < dataList.size()) {
- if (dataList.get(i) == null) {
- mutableRow.setNullAt(i)
- } else {
- unwrappers(i)(dataList.get(i), mutableRow, i)
- }
- i += 1
- }
- mutableRow
+ i += 1
}
+ mutableRow
}
}
+ }
+
+ override def processIterator(
+ inputIterator: Iterator[InternalRow],
+ hadoopConf: Configuration): Iterator[InternalRow] = {
+
+ val (outputStream, proc, inputStream, stderrBuffer) = initProc
+
+ // This nullability is a performance optimization in order to avoid an
Option.foreach() call
+ // inside of a loop
+ @Nullable val (inputSerde, inputSoi) = initInputSerDe(ioschema,
input).getOrElse((null, null))
+
+ // For HiveScriptTransformationExec, if inputSerde == null, but
outputSerde != null
+ // We will use StringBuffer to pass data, in this case, we should cast
data as string too.
+ val finalInput = if (inputSerde == null) {
+ inputExpressionsWithoutSerde
+ } else {
+ input
+ }
+
+ val outputProjection = new InterpretedProjection(finalInput, child.output)
+
+ // This new thread will consume the ScriptTransformation's input rows and
write them to the
+ // external process. That process's output will be read by this current
thread.
+ val writerThread = HiveScriptTransformationWriterThread(
+ inputIterator.map(outputProjection),
+ finalInput.map(_.dataType),
+ inputSerde,
+ inputSoi,
+ ioschema,
+ outputStream,
+ proc,
+ stderrBuffer,
+ TaskContext.get(),
+ hadoopConf
+ )
+
+ // This nullability is a performance optimization in order to avoid an
Option.foreach() call
+ // inside of a loop
+ @Nullable val (outputSerde, outputSoi) = {
Review comment:
ditto
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]