tvalentyn commented on code in PR #23931:
URL: https://github.com/apache/beam/pull/23931#discussion_r1046964357


##########
sdks/python/apache_beam/testing/analyzers/perf_analysis_utils.py:
##########
@@ -137,18 +142,26 @@ def fetch_metric_data(
       LIMIT {constants._NUM_DATA_POINTS_TO_RUN_CHANGE_POINT_ANALYSIS}
     """
   metric_data: pd.DataFrame = big_query_metrics_fetcher.fetch(query=query)
+  metric_data.sort_values(
+      by=[load_test_metrics_utils.SUBMIT_TIMESTAMP_LABEL], inplace=True)
   return (
-      metric_data[load_test_metrics_utils.VALUE_LABEL],
-      metric_data[load_test_metrics_utils.SUBMIT_TIMESTAMP_LABEL])
+      metric_data[load_test_metrics_utils.VALUE_LABEL].tolist(),
+      metric_data[load_test_metrics_utils.SUBMIT_TIMESTAMP_LABEL].tolist())
 
 
 def find_latest_change_point_index(metric_values: List[Union[float, int]]):
+  """
+  Args:
+   metric_values: Metric values used to run change point analysis.
+  Returns:
+   int: Right most change point index observed on metric_values.
+  """
   change_points_idx = e_divisive(metric_values)
   if not change_points_idx:
     return None
   # Consider the latest change point.
-  change_points_idx.sort(reverse=True)
-  change_point_index = change_points_idx[0]
+  change_points_idx.sort()
+  change_point_index = change_points_idx[-1]

Review Comment:
   nit: could also `return change_points_idx[-1]`.



##########
sdks/python/apache_beam/testing/analyzers/github_issues_utils.py:
##########
@@ -44,11 +44,16 @@
     "X-GitHub-Api-Version": "2022-11-28"
 }
 
-# Fill the GitHub issue description with the below variables.
-_ISSUE_DESCRIPTION_HEADER = """
-  Affected metric: `{}`
+_ISSUE_TITLE_TEMPLATE = """
+  Performance Regression or Improvement: {}:{}
 """
-_METRIC_INFO = "timestamp: {}, metric_value: `{}`"
+
+# TODO: Provide a better debugging tool for the user to visualize metrics.
+# For example, a link to dashboards or query to get recent data to analyze,etc.

Review Comment:
   we could add a link to a playbook initially.



##########
sdks/python/apache_beam/testing/analyzers/README.md:
##########
@@ -0,0 +1,132 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+# Performance alerts for Beam Python performance and load tests
+
+##  Alerts
+Performance regressions or improvements detected with the [Change Point 
Analysis](https://en.wikipedia.org/wiki/Change_detection) using 
[edivisive](https://github.com/apache/beam/blob/0a91d139dea4276dc46176c4cdcdfce210fc50c4/.test-infra/jenkins/job_InferenceBenchmarkTests_Python.groovy#L30)
+analyzer are automatically filed as Beam GitHub issues with a label 
`perf-alert`.
+
+The GitHub issue description will contain the information on the affected test 
and metric by providing the metric values for N consecutive runs with timestamps
+before and after the observed change point. Observed change point is pointed 
as `Anomaly` in the issue description.
+
+Example: [sample perf alert GitHub 
issue](https://github.com/AnandInguva/beam/issues/83).
+
+If a performance alert is created on a test, a GitHub issue will be created 
and the GitHub issue metadata such as GitHub issue
+URL, issue number along with the change point value and timestamp are exported 
to BigQuery. This data will be used to analyze the next change point observed 
on the same test to
+update already created GitHub issue or ignore performance alert by not 
creating GitHub issue to avoid duplicate issue creation.
+
+##  Config file structure
+The config file defines the structure to run change point analysis on a given 
test. To add a test to the config file,
+please follow the below structure.
+
+**NOTE**: The Change point analysis only supports reading the metric data from 
Big Query for now.
+
+```
+# the test_1 must be a unique id.
+test_1:
+  test_name: 
apache_beam.testing.benchmarks.inference.pytorch_image_classification_benchmarks
+  source: big_query
+  metrics_dataset: beam_run_inference
+  metrics_table: torch_inference_imagenet_results_resnet152
+  project: apache-beam-testing
+  metric_name: mean_load_model_latency_milli_secs
+  labels:
+    - run-inference
+  min_runs_between_change_points: 5
+  num_runs_in_change_point_window: 7
+```
+
+**NOTE**: `test_name` should be in the format `apache_beam.foo.bar`. It should 
point to a single test target.
+
+**Note**: If the source is **BigQuery**, the metrics_dataset, metrics_table, 
project and metric_name should match with the values defined for 
performance/load tests.
+The above example uses this [test 
configuration](https://github.com/apache/beam/blob/0a91d139dea4276dc46176c4cdcdfce210fc50c4/.test-infra/jenkins/job_InferenceBenchmarkTests_Python.groovy#L30)
+to fill up the values required to fetch the data from source.
+
+### Different ways to avoid false positive change points
+
+**min_runs_between_change_points**: As the metric data moves across the runs, 
the change point analysis can place the
+change point in a slightly different place. These change points refer to the 
same regression and are just noise.
+When we find a new change point, we will search up to the 
`min_runs_between_change_points` in both directions from the
+current change point. If an existing change point is found within the 
distance, then the current change point will be
+suppressed.
+
+**num_runs_in_change_point_window**: This defines how many runs to consider 
from the most recent run to be in change point window.
+Sometimes, the change point found might be way back in time and could be 
irrelevant. For a test, if a change point needs to be
+reported only when it was observed in the last 7 runs from the current run,
+setting `num_runs_in_change_point_window=7` will achieve it.
+
+##  Register a test for performance alerts
+
+If a new test needs to be registered for the performance alerting tool, please 
add the required test parameters to the
+config file.
+
+## Triage performance alert issues
+
+All the performance/load tests metrics defined at 
[beam/.test-infra/jenkins](https://github.com/apache/beam/tree/master/.test-infra/jenkins)
 are imported to [Grafana 
dashboards](http://104.154.241.245/d/1/getting-started?orgId=1) for 
visualization. Please 

Review Comment:
   is there an url that is not based on IP address?



##########
sdks/python/apache_beam/testing/analyzers/tests_config.yaml:
##########
@@ -0,0 +1,38 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+test_1:
+  test_name: 
apache_beam.testing.benchmarks.inference.pytorch_image_classification_benchmarks
+  metrics_dataset: beam_run_inference
+  metrics_table: torch_inference_imagenet_results_resnet152
+  project: apache-beam-testing
+  metric_name: mean_load_model_latency_milli_secs
+  labels:
+    - run-inference
+  min_runs_between_change_points: 5

Review Comment:
   you could also comment it out and/or say it's optional.



##########
sdks/python/apache_beam/testing/analyzers/perf_analysis.py:
##########
@@ -76,14 +75,17 @@ def run_change_point_analysis(params, test_id, 
big_query_metrics_fetcher):
   if not change_point_index:
     return
 
-  if not is_change_point_in_valid_window(num_runs_in_change_point_window,
-                                         change_point_index):
+  if not is_change_point_in_valid_window(
+      num_runs_in_change_point_window,
+      len(timestamps) - 1 - change_point_index):

Review Comment:
   make a helper variable for `len(timestamps) - 1 - change_point_index)` that 
will explain what this is (I'm confused actually what it is - 
`is_change_point_in_valid_window` takes a variable called `change_point_index`, 
but the meaning of that `change_point_index` would be different from the 
meaning of `change_point_index` in this code, since apparently we need to do 
some calculations first, hence also confusion that `change_point_index` means a 
different  concept in two places in the code. please fix.



##########
sdks/python/apache_beam/testing/analyzers/perf_analysis.py:
##########
@@ -103,13 +105,14 @@ def run_change_point_analysis(params, test_id, 
big_query_metrics_fetcher):
         timestamps=timestamps,
         min_runs_between_change_points=min_runs_between_change_points)
 
+  # TODO: remove before merging.

Review Comment:
   alternative: use logging.debug if you think it will be helpful in the future 
to keep this logging.



##########
sdks/python/apache_beam/testing/analyzers/README.md:
##########
@@ -0,0 +1,132 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+# Performance alerts for Beam Python performance and load tests
+
+##  Alerts
+Performance regressions or improvements detected with the [Change Point 
Analysis](https://en.wikipedia.org/wiki/Change_detection) using 
[edivisive](https://github.com/apache/beam/blob/0a91d139dea4276dc46176c4cdcdfce210fc50c4/.test-infra/jenkins/job_InferenceBenchmarkTests_Python.groovy#L30)
+analyzer are automatically filed as Beam GitHub issues with a label 
`perf-alert`.
+
+The GitHub issue description will contain the information on the affected test 
and metric by providing the metric values for N consecutive runs with timestamps
+before and after the observed change point. Observed change point is pointed 
as `Anomaly` in the issue description.
+
+Example: [sample perf alert GitHub 
issue](https://github.com/AnandInguva/beam/issues/83).
+
+If a performance alert is created on a test, a GitHub issue will be created 
and the GitHub issue metadata such as GitHub issue
+URL, issue number along with the change point value and timestamp are exported 
to BigQuery. This data will be used to analyze the next change point observed 
on the same test to
+update already created GitHub issue or ignore performance alert by not 
creating GitHub issue to avoid duplicate issue creation.
+
+##  Config file structure
+The config file defines the structure to run change point analysis on a given 
test. To add a test to the config file,
+please follow the below structure.
+
+**NOTE**: The Change point analysis only supports reading the metric data from 
Big Query for now.
+
+```
+# the test_1 must be a unique id.
+test_1:
+  test_name: 
apache_beam.testing.benchmarks.inference.pytorch_image_classification_benchmarks
+  source: big_query
+  metrics_dataset: beam_run_inference
+  metrics_table: torch_inference_imagenet_results_resnet152
+  project: apache-beam-testing
+  metric_name: mean_load_model_latency_milli_secs
+  labels:
+    - run-inference
+  min_runs_between_change_points: 5
+  num_runs_in_change_point_window: 7
+```
+
+**NOTE**: `test_name` should be in the format `apache_beam.foo.bar`. It should 
point to a single test target.
+
+**Note**: If the source is **BigQuery**, the metrics_dataset, metrics_table, 
project and metric_name should match with the values defined for 
performance/load tests.
+The above example uses this [test 
configuration](https://github.com/apache/beam/blob/0a91d139dea4276dc46176c4cdcdfce210fc50c4/.test-infra/jenkins/job_InferenceBenchmarkTests_Python.groovy#L30)
+to fill up the values required to fetch the data from source.
+
+### Different ways to avoid false positive change points
+
+**min_runs_between_change_points**: As the metric data moves across the runs, 
the change point analysis can place the
+change point in a slightly different place. These change points refer to the 
same regression and are just noise.
+When we find a new change point, we will search up to the 
`min_runs_between_change_points` in both directions from the
+current change point. If an existing change point is found within the 
distance, then the current change point will be
+suppressed.
+
+**num_runs_in_change_point_window**: This defines how many runs to consider 
from the most recent run to be in change point window.
+Sometimes, the change point found might be way back in time and could be 
irrelevant. For a test, if a change point needs to be
+reported only when it was observed in the last 7 runs from the current run,
+setting `num_runs_in_change_point_window=7` will achieve it.
+
+##  Register a test for performance alerts
+
+If a new test needs to be registered for the performance alerting tool, please 
add the required test parameters to the
+config file.
+
+## Triage performance alert issues
+
+All the performance/load tests metrics defined at 
[beam/.test-infra/jenkins](https://github.com/apache/beam/tree/master/.test-infra/jenkins)
 are imported to [Grafana 
dashboards](http://104.154.241.245/d/1/getting-started?orgId=1) for 
visualization. Please 
+find the alerted test dashboard to find a spike in the metric values.
+
+For example, for the below configuration,
+* test: 
`apache_beam.testing.benchmarks.inference.pytorch_image_classification_benchmarks`
+* metric_name: `mean_load_model_latency_milli_secs`
+
+Grafana dashboard can be found at 
http://104.154.241.245/d/ZpS8Uf44z/python-ml-runinference-benchmarks?orgId=1&viewPanel=7
+
+If the dashboard for a test is not found, you can use the 
+code below to generate a plot for the given test, metric_name. 
+
+If the performance/load test store the results in BigQuery using this 
[schema](https://github.com/apache/beam/blob/83679216cce2d52dbeb7e837f06ca1d57b31d509/sdks/python/apache_beam/testing/load_tests/load_test_metrics_utils.py#L66),
+then use the following code to fetch the metric_values for a `metric_name` for 
the last `30` runs and display a plot using matplotlib.
+
+**NOTE**: Install matplotlib and pandas using `pip install matplotlib pandas`.

Review Comment:
   nice, thanks. this would be better as a notebook in the repo, that people 
could refer to and update as necessary. 



##########
sdks/python/apache_beam/testing/analyzers/github_issues_utils.py:
##########
@@ -151,18 +157,19 @@ def get_issue_description(
   """
 
   # TODO: Add mean and median before and after the changepoint index.
-  upper_bound = min(
-      change_point_index + max_results_to_display + 1, len(metric_values))
-  lower_bound = max(0, change_point_index - max_results_to_display)
+  max_timestamp_index = min(
+      change_point_index + max_results_to_display, len(metric_values) - 1)
+  min_timestamp_index = max(0, change_point_index - max_results_to_display)
 
-  description = _ISSUE_DESCRIPTION_HEADER.format(metric_name) + 2 * '\n'
+  description = _ISSUE_DESCRIPTION_TEMPLATE.format(
+      test_name, metric_name) + 2 * '\n'
 
   runs_to_display = [
-      _METRIC_INFO.format(timestamps[i].ctime(), metric_values[i])
-      for i in range(lower_bound, upper_bound)
+      _METRIC_INFO_TEMPLATE.format(timestamps[i].ctime(), metric_values[i])
+      for i in range(max_timestamp_index, min_timestamp_index - 1, -1)

Review Comment:
   jfyi, another option is `for i in reversed(range(min, max + 1))`. 



##########
sdks/python/apache_beam/testing/analyzers/perf_analysis_utils.py:
##########
@@ -58,8 +55,6 @@ class GitHubIssueMetaData:
 
 def is_change_point_in_valid_window(
     num_runs_in_change_point_window: int, change_point_index: int) -> bool:
-  # If the change point is more than N runs behind the most recent run,
-  # Ignore the change point and don't raise an alert for it.
   return num_runs_in_change_point_window >= change_point_index

Review Comment:
   an index typically ranges from 0 to # of entries-1 should it therefore be 
`return num_runs_in_change_point_window > change_point_index` ?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to