This is an automated email from the ASF dual-hosted git repository.
ulyssesyou pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new 468000c43 [CORE] Rephrase metric names using "totaltime" as prefix
(#6058)
468000c43 is described below
commit 468000c43efe7d54910431c7268768c4cb2d0410
Author: Hongze Zhang <[email protected]>
AuthorDate: Thu Jun 13 09:18:11 2024 +0800
[CORE] Rephrase metric names using "totaltime" as prefix (#6058)
---
.../backendsapi/clickhouse/CHMetricsApi.scala | 34 +++++-----
.../execution/CHGenerateExecTransformer.scala | 2 +-
.../gluten/backendsapi/velox/VeloxMetricsApi.scala | 76 ++++++++++------------
.../gluten/execution/GenerateExecTransformer.scala | 2 +-
4 files changed, 51 insertions(+), 63 deletions(-)
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHMetricsApi.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHMetricsApi.scala
index 350548e98..a5fb4a185 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHMetricsApi.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHMetricsApi.scala
@@ -144,7 +144,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genFilterTransformerMetricsUpdater(metrics: Map[String,
SQLMetric]): MetricsUpdater =
@@ -160,7 +160,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genProjectTransformerMetricsUpdater(
@@ -181,7 +181,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"resizeOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
resize output rows"),
"aggregatingTime" ->
SQLMetrics.createTimingMetric(sparkContext, "time of aggregating"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genHashAggregateTransformerMetricsUpdater(
@@ -198,7 +198,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genExpandTransformerMetricsUpdater(metrics: Map[String,
SQLMetric]): MetricsUpdater =
@@ -213,17 +213,15 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
Map(
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size"),
"bytesSpilled" -> SQLMetrics.createSizeMetric(sparkContext, "shuffle
bytes spilled"),
- "computePidTime" -> SQLMetrics.createNanoTimingMetric(
- sparkContext,
- "totaltime to compute pid"),
- "splitTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to split"),
- "IOTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "totaltime
to disk io"),
+ "computePidTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"time to compute pid"),
+ "splitTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time to
split"),
+ "IOTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time to
disk io"),
"serializeTime" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime to block serialization"),
- "spillTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to spill"),
- "compressTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to compress"),
- "prepareTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to prepare"),
+ "time to block serialization"),
+ "spillTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time to
spill"),
+ "compressTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time
to compress"),
+ "prepareTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time
to prepare"),
"avgReadBatchNumRows" -> SQLMetrics
.createAverageMetric(sparkContext, "avg read batch num rows"),
"numInputRows" -> SQLMetrics.createMetric(sparkContext, "number of input
rows"),
@@ -245,7 +243,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genWindowTransformerMetricsUpdater(metrics: Map[String,
SQLMetric]): MetricsUpdater =
@@ -262,7 +260,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
Map(
"numInputRows" -> SQLMetrics.createMetric(sparkContext, "number of input
rows"),
"numOutputBatches" -> SQLMetrics.createMetric(sparkContext, "number of
output batches"),
- "convertTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time
to convert")
+ "convertTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to
convert")
)
override def genLimitTransformerMetrics(sparkContext: SparkContext):
Map[String, SQLMetric] =
@@ -275,7 +273,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genLimitTransformerMetricsUpdater(metrics: Map[String,
SQLMetric]): MetricsUpdater =
@@ -291,7 +289,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def genSortTransformerMetricsUpdater(metrics: Map[String,
SQLMetric]): MetricsUpdater =
@@ -339,7 +337,7 @@ class CHMetricsApi extends MetricsApi with Logging with
LogLevelUtil {
SQLMetrics.createTimingMetric(sparkContext, "time of postProjection"),
"probeTime" ->
SQLMetrics.createTimingMetric(sparkContext, "time of probe"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time"),
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time"),
"fillingRightJoinSideTime" -> SQLMetrics.createTimingMetric(
sparkContext,
"filling right join side time"),
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/execution/CHGenerateExecTransformer.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/execution/CHGenerateExecTransformer.scala
index f1e7d305a..733c0a472 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/execution/CHGenerateExecTransformer.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/execution/CHGenerateExecTransformer.scala
@@ -54,7 +54,7 @@ case class CHGenerateExecTransformer(
"extraTime" -> SQLMetrics.createTimingMetric(sparkContext, "extra
operators time"),
"inputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for data"),
"outputWaitTime" -> SQLMetrics.createTimingMetric(sparkContext, "time of
waiting for output"),
- "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "total time")
+ "totalTime" -> SQLMetrics.createTimingMetric(sparkContext, "time")
)
override def metricsUpdater(): MetricsUpdater = new
GenerateMetricsUpdater(metrics)
diff --git
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxMetricsApi.scala
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxMetricsApi.scala
index 0811d71d1..c2696de50 100644
---
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxMetricsApi.scala
+++
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxMetricsApi.scala
@@ -41,7 +41,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
sparkContext: SparkContext): Map[String, SQLMetric] = {
Map(
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of input iterator"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
input iterator"),
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors")
)
@@ -62,7 +62,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of batch scan"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
batch scan"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"scanTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "scan
time"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
@@ -93,10 +93,8 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"rawInputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
raw input bytes"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "scanTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "totaltime
of scan"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(
- sparkContext,
- "totaltime of scan and filter"),
+ "scanTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
scan"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
scan and filter"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numFiles" -> SQLMetrics.createMetric(sparkContext, "number of files
read"),
@@ -133,10 +131,8 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"rawInputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
raw input bytes"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "scanTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "totaltime
of scan"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(
- sparkContext,
- "totaltime of scan and filter"),
+ "scanTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
scan"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
scan and filter"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numFiles" -> SQLMetrics.createMetric(sparkContext, "number of files
read"),
@@ -171,7 +167,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of filter"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
filter"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -187,7 +183,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of project"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
project"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -205,7 +201,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"aggOutputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"aggOutputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
"aggCpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
- "aggWallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of aggregation"),
+ "aggWallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time
of aggregation"),
"aggPeakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"aggNumMemoryAllocations" -> SQLMetrics.createMetric(
sparkContext,
@@ -222,13 +218,13 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"rowConstruction cpu wall time count"),
"rowConstructionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of rowConstruction"),
+ "time of rowConstruction"),
"extractionCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"extraction cpu wall time count"),
"extractionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of extraction"),
+ "time of extraction"),
"finalOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
final output rows"),
"finalOutputVectors" -> SQLMetrics.createMetric(
sparkContext,
@@ -244,7 +240,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of expand"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
expand"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -265,10 +261,10 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size"),
"numPartitions" -> SQLMetrics.createMetric(sparkContext, "number of
partitions"),
"bytesSpilled" -> SQLMetrics.createSizeMetric(sparkContext, "shuffle
bytes spilled"),
- "splitBufferSize" -> SQLMetrics.createSizeMetric(sparkContext, "split
buffer size total"),
- "splitTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to split"),
- "spillTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to spill"),
- "deserializeTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime deserialize"),
+ "splitBufferSize" -> SQLMetrics.createSizeMetric(sparkContext, "split
buffer size"),
+ "splitTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time to
split"),
+ "spillTime" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time to
spill"),
+ "deserializeTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"time to deserialize"),
"avgReadBatchNumRows" -> SQLMetrics
.createAverageMetric(sparkContext, "avg read batch num rows"),
"numInputRows" -> SQLMetrics.createMetric(sparkContext, "number of input
rows"),
@@ -281,8 +277,8 @@ class VeloxMetricsApi extends MetricsApi with Logging {
baseMetrics
} else {
baseMetrics ++ Map(
- "compressTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime to compress"),
- "decompressTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime decompress")
+ "compressTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"time to compress"),
+ "decompressTime" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"time to decompress")
)
}
}
@@ -292,7 +288,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of window"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
window"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -314,7 +310,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
Map(
"numInputRows" -> SQLMetrics.createMetric(sparkContext, "number of input
rows"),
"numOutputBatches" -> SQLMetrics.createMetric(sparkContext, "number of
output batches"),
- "convertTime" -> SQLMetrics.createTimingMetric(sparkContext, "totaltime
to convert")
+ "convertTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to
convert")
)
override def genLimitTransformerMetrics(sparkContext: SparkContext):
Map[String, SQLMetric] =
@@ -322,7 +318,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of limit"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
limit"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -347,7 +343,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of sort"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
sort"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -370,7 +366,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"numOutputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"numOutputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of merge join"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
merge join"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -381,19 +377,19 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"stream preProject cpu wall time count"),
"streamPreProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of stream preProjection"),
+ "time of stream preProjection"),
"bufferPreProjectionCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"buffer preProject cpu wall time count"),
"bufferPreProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of buffer preProjection"),
+ "time of buffer preProjection"),
"postProjectionCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"postProject cpu wall time count"),
"postProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of postProjection")
+ "time of postProjection")
)
override def genSortMergeJoinTransformerMetricsUpdater(
@@ -433,9 +429,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"hashBuildCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"hash build cpu wall time count"),
- "hashBuildWallNanos" -> SQLMetrics.createNanoTimingMetric(
- sparkContext,
- "totaltime of hash build"),
+ "hashBuildWallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"time of hash build"),
"hashBuildPeakMemoryBytes" -> SQLMetrics.createSizeMetric(
sparkContext,
"hash build peak memory bytes"),
@@ -469,9 +463,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"hashProbeCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"hash probe cpu wall time count"),
- "hashProbeWallNanos" -> SQLMetrics.createNanoTimingMetric(
- sparkContext,
- "totaltime of hash probe"),
+ "hashProbeWallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"time of hash probe"),
"hashProbePeakMemoryBytes" -> SQLMetrics.createSizeMetric(
sparkContext,
"hash probe peak memory bytes"),
@@ -501,19 +493,19 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"stream preProject cpu wall time count"),
"streamPreProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of stream preProjection"),
+ "time of stream preProjection"),
"buildPreProjectionCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"preProject cpu wall time count"),
"buildPreProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime to build preProjection"),
+ "time to build preProjection"),
"postProjectionCpuCount" -> SQLMetrics.createMetric(
sparkContext,
"postProject cpu wall time count"),
"postProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric(
sparkContext,
- "totaltime of postProjection"),
+ "time of postProjection"),
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"numOutputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"numOutputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes")
@@ -528,9 +520,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(
- sparkContext,
- "total time of NestedLoopJoin"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
NestedLoopJoin"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
@@ -546,7 +536,7 @@ class VeloxMetricsApi extends MetricsApi with Logging {
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"outputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"outputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of sample"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
sample"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
diff --git
a/backends-velox/src/main/scala/org/apache/gluten/execution/GenerateExecTransformer.scala
b/backends-velox/src/main/scala/org/apache/gluten/execution/GenerateExecTransformer.scala
index c9b0abd6f..8ceea8c14 100644
---
a/backends-velox/src/main/scala/org/apache/gluten/execution/GenerateExecTransformer.scala
+++
b/backends-velox/src/main/scala/org/apache/gluten/execution/GenerateExecTransformer.scala
@@ -55,7 +55,7 @@ case class GenerateExecTransformer(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of
output rows"),
"numOutputVectors" -> SQLMetrics.createMetric(sparkContext, "number of
output vectors"),
"numOutputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of
output bytes"),
- "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext,
"totaltime of generate"),
+ "wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "time of
generate"),
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time
count"),
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak
memory bytes"),
"numMemoryAllocations" -> SQLMetrics.createMetric(
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]