Github user scwf commented on a diff in the pull request:

    https://github.com/apache/spark/pull/2685#discussion_r19256989
  
    --- Diff: 
sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
 ---
    @@ -54,158 +45,8 @@ private[thriftserver] class 
SparkSQLOperationManager(hiveContext: HiveContext)
           confOverlay: JMap[String, String],
           async: Boolean): ExecuteStatementOperation = synchronized {
     
    -    val operation = new ExecuteStatementOperation(parentSession, 
statement, confOverlay) {
    -      private var result: SchemaRDD = _
    -      private var iter: Iterator[SparkRow] = _
    -      private var dataTypes: Array[DataType] = _
    -
    -      def close(): Unit = {
    -        // RDDs will be cleaned automatically upon garbage collection.
    -        logDebug("CLOSING")
    -      }
    -
    -      def getNextRowSet(order: FetchOrientation, maxRowsL: Long): RowSet = 
{
    -        if (!iter.hasNext) {
    -          new RowSet()
    -        } else {
    -          // maxRowsL here typically maps to 
java.sql.Statement.getFetchSize, which is an int
    -          val maxRows = maxRowsL.toInt
    -          var curRow = 0
    -          var rowSet = new ArrayBuffer[Row](maxRows.min(1024))
    -
    -          while (curRow < maxRows && iter.hasNext) {
    -            val sparkRow = iter.next()
    -            val row = new Row()
    -            var curCol = 0
    -
    -            while (curCol < sparkRow.length) {
    -              if (sparkRow.isNullAt(curCol)) {
    -                addNullColumnValue(sparkRow, row, curCol)
    -              } else {
    -                addNonNullColumnValue(sparkRow, row, curCol)
    -              }
    -              curCol += 1
    -            }
    -            rowSet += row
    -            curRow += 1
    -          }
    -          new RowSet(rowSet, 0)
    -        }
    -      }
    -
    -      def addNonNullColumnValue(from: SparkRow, to: Row, ordinal: Int) {
    -        dataTypes(ordinal) match {
    -          case StringType =>
    -            to.addString(from(ordinal).asInstanceOf[String])
    -          case IntegerType =>
    -            to.addColumnValue(ColumnValue.intValue(from.getInt(ordinal)))
    -          case BooleanType =>
    -            
to.addColumnValue(ColumnValue.booleanValue(from.getBoolean(ordinal)))
    -          case DoubleType =>
    -            
to.addColumnValue(ColumnValue.doubleValue(from.getDouble(ordinal)))
    -          case FloatType =>
    -            
to.addColumnValue(ColumnValue.floatValue(from.getFloat(ordinal)))
    -          case DecimalType =>
    -            val hiveDecimal = 
from.get(ordinal).asInstanceOf[BigDecimal].bigDecimal
    -            to.addColumnValue(ColumnValue.stringValue(new 
HiveDecimal(hiveDecimal)))
    -          case LongType =>
    -            to.addColumnValue(ColumnValue.longValue(from.getLong(ordinal)))
    -          case ByteType =>
    -            to.addColumnValue(ColumnValue.byteValue(from.getByte(ordinal)))
    -          case ShortType =>
    -            
to.addColumnValue(ColumnValue.shortValue(from.getShort(ordinal)))
    -          case TimestampType =>
    -            to.addColumnValue(
    -              
ColumnValue.timestampValue(from.get(ordinal).asInstanceOf[Timestamp]))
    -          case BinaryType | _: ArrayType | _: StructType | _: MapType =>
    -            val hiveString = result
    -              .queryExecution
    -              .asInstanceOf[HiveContext#QueryExecution]
    -              .toHiveString((from.get(ordinal), dataTypes(ordinal)))
    -            to.addColumnValue(ColumnValue.stringValue(hiveString))
    -        }
    -      }
    -
    -      def addNullColumnValue(from: SparkRow, to: Row, ordinal: Int) {
    -        dataTypes(ordinal) match {
    -          case StringType =>
    -            to.addString(null)
    -          case IntegerType =>
    -            to.addColumnValue(ColumnValue.intValue(null))
    -          case BooleanType =>
    -            to.addColumnValue(ColumnValue.booleanValue(null))
    -          case DoubleType =>
    -            to.addColumnValue(ColumnValue.doubleValue(null))
    -          case FloatType =>
    -            to.addColumnValue(ColumnValue.floatValue(null))
    -          case DecimalType =>
    -            to.addColumnValue(ColumnValue.stringValue(null: HiveDecimal))
    -          case LongType =>
    -            to.addColumnValue(ColumnValue.longValue(null))
    -          case ByteType =>
    -            to.addColumnValue(ColumnValue.byteValue(null))
    -          case ShortType =>
    -            to.addColumnValue(ColumnValue.shortValue(null))
    -          case TimestampType =>
    -            to.addColumnValue(ColumnValue.timestampValue(null))
    -          case BinaryType | _: ArrayType | _: StructType | _: MapType =>
    -            to.addColumnValue(ColumnValue.stringValue(null: String))
    -        }
    -      }
    -
    -      def getResultSetSchema: TableSchema = {
    --- End diff --
    
    actually this is a override method, we can not move to outside


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to