[ 
https://issues.apache.org/jira/browse/KYLIN-4857?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17263108#comment-17263108
 ] 

ASF GitHub Bot commented on KYLIN-4857:
---------------------------------------

zzcclp commented on a change in pull request #1538:
URL: https://github.com/apache/kylin/pull/1538#discussion_r555541622



##########
File path: 
core-metrics/src/main/java/org/apache/kylin/metrics/QuerySparkMetrics.java
##########
@@ -0,0 +1,398 @@
+package org.apache.kylin.metrics;
+
+import org.apache.kylin.shaded.com.google.common.cache.CacheBuilder;
+import org.apache.kylin.shaded.com.google.common.cache.RemovalListener;
+import org.apache.kylin.shaded.com.google.common.cache.RemovalNotification;
+import org.apache.kylin.shaded.com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+public class QuerySparkMetrics {
+    private static final Logger logger = 
LoggerFactory.getLogger(QuerySparkMetrics.class);
+    private static final QuerySparkMetrics instance = new QuerySparkMetrics();
+    private org.apache.kylin.shaded.com.google.common.cache.Cache<String, 
QueryExecutionMetrics> queryExecutionMetricsMap;
+
+    private QuerySparkMetrics() {
+        queryExecutionMetricsMap = CacheBuilder.newBuilder()
+                .maximumSize(1000)
+                .expireAfterWrite(600, TimeUnit.SECONDS)
+                .removalListener(new RemovalListener<String, 
QueryExecutionMetrics>() {
+                    @Override
+                    public void onRemoval(RemovalNotification<String, 
QueryExecutionMetrics> notification) {
+                        logger.debug("Query metrics {} is removed due to {}",
+                                notification.getKey(), 
notification.getCause());
+                    }
+                }).build();
+
+    }
+
+    public static QuerySparkMetrics getInstance() {
+        return instance;
+    }
+
+    public void onJobStart(String queryId, String sparderName, long 
executionId,
+                           long executionStartTime, int jobId, long 
jobStartTime) {
+        QueryExecutionMetrics queryExecutionMetrics = 
queryExecutionMetricsMap.asMap().get(queryId);
+        if (queryExecutionMetrics == null) {
+            queryExecutionMetrics = new QueryExecutionMetrics();
+            ConcurrentMap<Integer, SparkJobMetrics> sparkJobMetricsMap = 
Maps.newConcurrentMap();
+            queryExecutionMetrics.setQueryId(queryId);
+            queryExecutionMetrics.setSparderName(sparderName);
+            queryExecutionMetrics.setExecutionId(executionId);
+            queryExecutionMetrics.setStartTime(executionStartTime);
+            queryExecutionMetrics.setSparkJobMetricsMap(sparkJobMetricsMap);
+            queryExecutionMetricsMap.put(queryId, queryExecutionMetrics);
+        }
+        SparkJobMetrics sparkJobMetrics = new SparkJobMetrics();
+        sparkJobMetrics.setExecutionId(executionId);
+        sparkJobMetrics.setJobId(jobId);
+        sparkJobMetrics.setStartTime(jobStartTime);
+
+        ConcurrentMap<Integer, SparkStageMetrics> sparkStageMetricsMap = 
Maps.newConcurrentMap();
+        sparkJobMetrics.setSparkStageMetricsMap(sparkStageMetricsMap);
+
+        queryExecutionMetrics.getSparkJobMetricsMap().put(jobId, 
sparkJobMetrics);
+
+    }
+
+    public void onSparkStageStart(String queryId, int jobId, int stageId, 
String stageType, long startTime) {
+        SparkStageMetrics sparkStageMetrics = new SparkStageMetrics();
+        sparkStageMetrics.setStageId(stageId);
+        sparkStageMetrics.setStageType(stageType);
+        sparkStageMetrics.setStartTime(startTime);
+        
queryExecutionMetricsMap.asMap().get(queryId).getSparkJobMetricsMap().get(jobId).getSparkStageMetricsMap().put(stageId,
 sparkStageMetrics);
+
+    }
+
+    public void updateSparkStageMetrics(String queryId, int jobId, int 
stageId, boolean isSuccess, long resultSize,
+                                     long executorDeserializeTime, long 
executorDeserializeCpuTime, long executorRunTime,
+                                     long executorCpuTime, long jvmGCTime, 
long resultSerializationTime, long memoryBytesSpilled,
+                                     long diskBytesSpilled, long 
peakExecutionMemory) {

Review comment:
       There are too many parameters, it's more than 12, consider to use a 
class to indicate.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


> Refactor system cube for kylin4
> -------------------------------
>
>                 Key: KYLIN-4857
>                 URL: https://issues.apache.org/jira/browse/KYLIN-4857
>             Project: Kylin
>          Issue Type: Improvement
>          Components: Metrics
>    Affects Versions: v4.0.0-alpha
>            Reporter: Yaqian Zhang
>            Assignee: Yaqian Zhang
>            Priority: Minor
>             Fix For: v4.0.0-beta
>
>
> With the change of query engine and storage, the query related metrics 
> collected by system cube in kylin3 are no longer applicable to kylin4, so it 
> needs to be refactor to adapt to the spark query engine in kylin4.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to