Github user jiangxb1987 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20601#discussion_r168103464
  
    --- Diff: core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala ---
    @@ -963,33 +965,60 @@ private[ui] class TaskPagedTable(
     
     private object ApiHelper {
     
    +  val HEADER_ID = "ID"
    +  val HEADER_TASK_INDEX = "Index"
    +  val HEADER_ATTEMPT = "Attempt"
    +  val HEADER_STATUS = "Status"
    +  val HEADER_LOCALITY = "Locality Level"
    +  val HEADER_EXECUTOR = "Executor ID"
    +  val HEADER_HOST = "Host"
    +  val HEADER_LAUNCH_TIME = "Launch Time"
    +  val HEADER_DURATION = "Duration"
    +  val HEADER_SCHEDULER_DELAY = "Scheduler Delay"
    +  val HEADER_DESER_TIME = "Task Deserialization Time"
    +  val HEADER_GC_TIME = "GC Time"
    +  val HEADER_SER_TIME = "Result Serialization Time"
    +  val HEADER_GETTING_RESULT_TIME = "Getting Result Time"
    +  val HEADER_PEAK_MEM = "Peak Execution Memory"
    +  val HEADER_ACCUMULATORS = "Accumulators"
    +  val HEADER_INPUT_SIZE = "Input Size / Records"
    +  val HEADER_OUTPUT_SIZE = "Output Size / Records"
    +  val HEADER_SHUFFLE_READ_TIME = "Shuffle Read Blocked Time"
    +  val HEADER_SHUFFLE_TOTAL_READS = "Shuffle Read Size / Records"
    +  val HEADER_SHUFFLE_REMOTE_READS = "Shuffle Remote Reads"
    +  val HEADER_SHUFFLE_WRITE_TIME = "Write Time"
    +  val HEADER_SHUFFLE_WRITE_SIZE = "Shuffle Write Size / Records"
    +  val HEADER_MEM_SPILL = "Shuffle Spill (Memory)"
    +  val HEADER_DISK_SPILL = "Shuffle Spill (Disk)"
    +  val HEADER_ERROR = "Errors"
     
       private val COLUMN_TO_INDEX = Map(
    -    "ID" -> null.asInstanceOf[String],
    -    "Index" -> TaskIndexNames.TASK_INDEX,
    -    "Attempt" -> TaskIndexNames.ATTEMPT,
    -    "Status" -> TaskIndexNames.STATUS,
    -    "Locality Level" -> TaskIndexNames.LOCALITY,
    -    "Executor ID / Host" -> TaskIndexNames.EXECUTOR,
    -    "Launch Time" -> TaskIndexNames.LAUNCH_TIME,
    -    "Duration" -> TaskIndexNames.DURATION,
    -    "Scheduler Delay" -> TaskIndexNames.SCHEDULER_DELAY,
    -    "Task Deserialization Time" -> TaskIndexNames.DESER_TIME,
    -    "GC Time" -> TaskIndexNames.GC_TIME,
    -    "Result Serialization Time" -> TaskIndexNames.SER_TIME,
    -    "Getting Result Time" -> TaskIndexNames.GETTING_RESULT_TIME,
    -    "Peak Execution Memory" -> TaskIndexNames.PEAK_MEM,
    -    "Accumulators" -> TaskIndexNames.ACCUMULATORS,
    -    "Input Size / Records" -> TaskIndexNames.INPUT_SIZE,
    -    "Output Size / Records" -> TaskIndexNames.OUTPUT_SIZE,
    -    "Shuffle Read Blocked Time" -> TaskIndexNames.SHUFFLE_READ_TIME,
    -    "Shuffle Read Size / Records" -> TaskIndexNames.SHUFFLE_TOTAL_READS,
    -    "Shuffle Remote Reads" -> TaskIndexNames.SHUFFLE_REMOTE_READS,
    -    "Write Time" -> TaskIndexNames.SHUFFLE_WRITE_TIME,
    -    "Shuffle Write Size / Records" -> TaskIndexNames.SHUFFLE_WRITE_SIZE,
    -    "Shuffle Spill (Memory)" -> TaskIndexNames.MEM_SPILL,
    -    "Shuffle Spill (Disk)" -> TaskIndexNames.DISK_SPILL,
    -    "Errors" -> TaskIndexNames.ERROR)
    +    HEADER_ID -> null.asInstanceOf[String],
    +    HEADER_TASK_INDEX -> TaskIndexNames.TASK_INDEX,
    +    HEADER_ATTEMPT -> TaskIndexNames.ATTEMPT,
    +    HEADER_STATUS -> TaskIndexNames.STATUS,
    +    HEADER_LOCALITY -> TaskIndexNames.LOCALITY,
    +    HEADER_EXECUTOR -> TaskIndexNames.EXECUTOR,
    +    HEADER_HOST -> TaskIndexNames.EXECUTOR,
    --- End diff --
    
    Seems we'd better have a new TaskIndexNames for host column.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to