Github user srowen commented on a diff in the pull request:

    https://github.com/apache/spark/pull/22048#discussion_r213700228
  
    --- Diff: sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala ---
    @@ -301,16 +301,16 @@ class Dataset[T] private[sql](
           // Compute the width of each column
           for (row <- rows) {
             for ((cell, i) <- row.zipWithIndex) {
    -          colWidths(i) = math.max(colWidths(i), cell.length)
    +          colWidths(i) = math.max(colWidths(i), 
Utils.stringHalfWidth(cell))
             }
           }
     
           val paddedRows = rows.map { row =>
             row.zipWithIndex.map { case (cell, i) =>
               if (truncate > 0) {
    -            StringUtils.leftPad(cell, colWidths(i))
    --- End diff --
    
    Oh why not use this method?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to