[ 
https://issues.apache.org/jira/browse/BEAM-4065?focusedWorklogId=104682&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-104682
 ]

ASF GitHub Bot logged work on BEAM-4065:
----------------------------------------

                Author: ASF GitHub Bot
            Created on: 22/May/18 16:59
            Start Date: 22/May/18 16:59
    Worklog Time Spent: 10m 
      Work Description: chamikaramj closed pull request #5180: [BEAM-4065] 
Basic performance tests analysis added.
URL: https://github.com/apache/beam/pull/5180
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/.test-infra/jenkins/common_job_properties.groovy 
b/.test-infra/jenkins/common_job_properties.groovy
index f07ac59e08d..79c9d560ea7 100644
--- a/.test-infra/jenkins/common_job_properties.groovy
+++ b/.test-infra/jenkins/common_job_properties.groovy
@@ -117,6 +117,7 @@ class common_job_properties {
       }
       credentialsBinding {
         string("COVERALLS_REPO_TOKEN", "beam-coveralls-token")
+        string("SLACK_WEBHOOK_URL", "beam-slack-webhook-url")
       }
     }
   }
diff --git a/.test-infra/jenkins/job_beam_PerformanceTests_Analysis.groovy 
b/.test-infra/jenkins/job_beam_PerformanceTests_Analysis.groovy
new file mode 100644
index 00000000000..c23743fb840
--- /dev/null
+++ b/.test-infra/jenkins/job_beam_PerformanceTests_Analysis.groovy
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import common_job_properties
+
+def testConfiguration = [
+                jobName           : 'beam_PerformanceTests_Analysis',
+                jobDescription    : 'Runs python script that is verifying 
results in bq',
+                prCommitStatusName: 'Performance Tests Analysis',
+                prTriggerPhase    : 'Run Performance Tests Analysis',
+                bqTables: [
+                        "beam_performance.textioit_pkb_results",
+                        "beam_performance.compressed_textioit_pkb_results",
+                        "beam_performance.avroioit_pkb_results",
+                        "beam_performance.tfrecordioit_pkb_results",
+                        "beam_performance.xmlioit_pkb_results",
+                        "beam_performance.textioit_hdfs_pkb_results",
+                        
"beam_performance.compressed_textioit_hdfs_pkb_results",
+                        "beam_performance.avroioit_hdfs_pkb_results",
+                        "beam_performance.xmlioit_hdfs_pkb_results",
+                        "beam_performance.hadoopinputformatioit_pkb_results",
+                        "beam_performance.mongodbioit_pkb_results",
+                        "beam_performance.jdbcioit_pkb_results"
+                ]
+        ]
+
+// This job runs the performance tests analysis job and produces daily report.
+job(testConfiguration.jobName) {
+    description(testConfiguration.jobDescription)
+
+    // Set default Beam job properties.
+    common_job_properties.setTopLevelMainJobProperties(delegate)
+
+    // Allows triggering this build against pull requests.
+    common_job_properties.enablePhraseTriggeringFromPullRequest(
+            delegate,
+            testConfiguration.prCommitStatusName,
+            testConfiguration.prTriggerPhase)
+
+    // Run job in postcommit every 24 hours, don't trigger every push, and
+    // don't email individual committers.
+    common_job_properties.setPostCommit(
+            delegate,
+            '30 */24 * * *',
+            false,
+            '[email protected]',
+            false)
+
+
+    steps {
+        // Clean up environment after other python using tools.
+        shell('rm -rf PerfKitBenchmarker')
+        shell('rm -rf .env')
+
+        // create new VirtualEnv, inherit already existing packages
+        shell('virtualenv .env --system-site-packages')
+
+        // update setuptools and pip
+        shell('.env/bin/pip install --upgrade setuptools pip')
+
+        // Install job requirements for analysis script.
+        shell('.env/bin/pip install requests google.cloud.bigquery mock')
+
+        // Launch verification tests before executing script.
+        shell('.env/bin/python ' + common_job_properties.checkoutDir + 
'/.test-infra/jenkins/verify_performance_test_results_test.py')
+
+        // Launch performance tests analysis.
+        shell('.env/bin/python ' + common_job_properties.checkoutDir + 
'/.test-infra/jenkins/verify_performance_test_results.py --bqtable \"'+ 
testConfiguration.bqTables + '\" ' + '--metric=\"run_time\" ' + '--mode=report 
--send_notification')
+    }
+}
\ No newline at end of file
diff --git a/.test-infra/jenkins/verify_performance_test_results.py 
b/.test-infra/jenkins/verify_performance_test_results.py
new file mode 100644
index 00000000000..cf3be579f73
--- /dev/null
+++ b/.test-infra/jenkins/verify_performance_test_results.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+#
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+#   This script performs basic analytic of performance tests results.
+#   It operates in two modes:
+#   --mode=report - In this mode script iterates over list of BigQuery tables 
and
+#   analyses the data. This mode is intended to be run on a regulars basis, 
e.g. daily.
+#   Report will contain average tests execution time of given metric, its 
comparison with
+#   with average calculated from historical data, recent standard deviation 
and standard
+#   deviation calculated based on historical data.
+#   --mode=validation - In this mode script will analyse single BigQuery table 
and check
+#   recent results.
+#
+#   Other parameters are described in script. Notification is optional 
parameter.
+#   --send_notification - if present, script will send notification to slack 
channel.
+#   Requires setting env variable SLACK_WEBOOK_URL which value could be 
obtained by
+#   creating incoming webhook on Slack.
+#
+#   This script is intended to be used only by Jenkins.
+#   Example script usage:
+#   verify_performance_test_results.py \
+#     --bqtable='["beam_performance.avroioit_hdfs_pkb_results", \
+#                 "beam_performance.textioit_pkb_results"]' \
+#     --metric="run_time" --mode=report --send_notification
+#
+
+import argparse, time, calendar, json, re, os, requests
+from google.cloud import bigquery
+
+### TIME SETTINGS ###########
+TIME_PATTERN = '%d-%m-%Y_%H-%M-%S'
+NOW = int(time.time())
+# First analysis time interval definition - 24h before
+TIME_POINT_1 = NOW - 1 * 86400
+# Second analysis time interval definition - week before
+TIME_POINT_2 = NOW - 7 * 86400
+##############################
+
+SLACK_USER = os.getenv('SLACK_USER', "jenkins-beam")
+SLACK_WEBHOOK_URL = os.getenv('SLACK_WEBHOOK_URL')
+SLACK_CHANNEL = os.getenv('SLACK_CHANNEL', "beam-testing")
+
+def submit_big_query_job(sql_command, return_type):
+    """ Submits job to BigQuery.
+
+    :param sql_command: string that contains SQL command that will be run on 
BigQuery table.
+    :param return_type: type of the result expected after executing SQL 
command.
+    :return: depending on return type, this could be a single value or None
+    """
+    query_job = client.query(sql_command)
+    results = query_job.result()
+    if return_type == "value":
+        # All queries must have single element in output displayed as 
query_result
+        for row in results:
+            return row.query_result
+    else:
+        print("This type is not supported")
+        return None
+
+def count_queries(table_name, time_start, time_stop, metric):
+    """ Counts how many rows of data was inserted in given time interval for 
given metric name
+
+    :param table_name: BigQuery table name that query will be run over.
+    :param time_start: timestamp value representing the starting point in 
further history.
+    :param time_stop: timestamp representing the ending point in closer 
history/present.
+    :param metric: string with metric name that will be checked.
+    :return: number of rows.
+    """
+    sql_command = 'select count(*) as query_result from {} where TIMESTAMP > 
{} and TIMESTAMP < {} and METRIC=\'{}\''.format(
+        table_name,
+        time_start,
+        time_stop,
+        metric
+        )
+    count = submit_big_query_job(sql_command, "value")
+    print("Number of records inserted into {} between {} - {}: {}".format(
+        table_name,
+        time.strftime(TIME_PATTERN, time.gmtime(time_start)),
+        time.strftime(TIME_PATTERN, time.gmtime(time_stop)),
+        count))
+    return count
+
+def get_average_from(table_name, time_start, time_stop, metric):
+    """ Calculates the average value of given metric in a period of time.
+
+    :param table_name: BigQuery table name that query will be run over.
+    :param time_start: timestamp value representing the starting point in 
further history.
+    :param time_stop: timestamp representing the ending point in closer 
history/present.
+    :param metric: string with metric name that will be checked.
+    :return: calculated average value.
+    """
+    sql_command = 'select avg(value) as query_result from {} where TIMESTAMP > 
{} and TIMESTAMP < {} and METRIC=\'{}\''.format(
+        table_name,
+        time_start,
+        time_stop,
+        metric
+    )
+    average = submit_big_query_job(sql_command, "value")
+    return average
+
+def get_stddev_from(table_name, time_start, time_stop, metric):
+    """ Calculates the standard deviation of give metric in a period of time.
+
+    :param table_name: BigQuery table name that query will be run over.
+    :param time_start: timestamp value representing the starting point in 
further history.
+    :param time_stop: timestamp representing the ending point in closer 
history/present.
+    :param metric: string with metric name that will be checked.
+    :return: calculated standard deviation value.
+    """
+    sql_command = 'select stddev(value) as query_result from {} where 
TIMESTAMP > {} and TIMESTAMP < {} and METRIC=\'{}\''.format(
+        table_name,
+        time_start,
+        time_stop,
+        metric
+    )
+    stddev = submit_big_query_job(sql_command, "value")
+    return stddev
+
+def calculate_historical_data(table_name, time_start, time_stop, metric, 
nb_older_records, average_recent):
+    """ Calculates percentage increase/descrease of average. Recent average is 
given as parameter
+    and historical average is calculated in given period of time. Additionally 
historical standard
+    deviation is calculated.
+
+    :param table_name: BigQuery table name that query will be run over.
+    :param time_start: timestamp value representing the starting point in 
further history.
+    :param time_stop: timestamp representing the ending point in closer 
history/present.
+    :param metric: string with metric name that will be checked.
+    :param nb_older_records: number of rows with historical data.
+    :param average_recent: average value from recent data.
+    :returns: percentage increase and historical standard deviation.
+    """
+    average_old = get_average_from(table_name, time_start, time_stop, metric)
+    if nb_older_records > 1:
+        stddev_old = get_stddev_from(table_name, time_start, time_stop, metric)
+    else:
+        # Standard deviation is 0 when there is only single value.
+        stddev_old = 0
+    percentage_increase = 100*(average_recent - average_old)/average_old
+    return percentage_increase, stddev_old
+
+def update_messages(table_name, metric, average_recent, percentage_increase, 
stddev_recent, stddev_old,
+                    report_message, slack_report_message):
+    """ This function updates messages for given table.
+
+    :param table_name: BigQuery table name that was analysed.
+    :param metric: string with metric name.
+    :param average_recent: calculated average from recent data.
+    :param percentage_increase: calculated percentage increase based on recent 
and historical data.
+    :param stddev_recent: calculated standard deviation from recent data.
+    :param stddev_old: calculated standard deviation based on historical data.
+    :param report_message: message to be updated. This message will be printed 
out in Jenkins.
+    :param slack_report_message: message to be updated. This message will be 
sent on Slack.
+    :returns: updated messages.
+    """
+
+    if percentage_increase > 10:
+        msg_app = '+++'
+    elif 5 < percentage_increase <= 10:
+        msg_app = '++'
+    elif 0 < percentage_increase <= 5:
+        msg_app = '+'
+    elif -5 < percentage_increase < 0:
+        msg_app = '-'
+    elif -10 < percentage_increase <= -5:
+        msg_app = '--'
+    elif percentage_increase < -10:
+        msg_app = '---'
+    else:
+        msg_app = ''
+
+    report_message = report_message + '{} - {}, avg_time {:.2f}s, change 
{:+.3f}%, stddev {:.2f}, stddev_old {:.2f} # => {} \n'.format(
+        table_name, metric, average_recent, percentage_increase, 
stddev_recent, stddev_old, msg_app)
+    slack_report_message = slack_report_message + format_slack_message(
+        table_name, metric, average_recent, percentage_increase, 
stddev_recent, stddev_old)
+    return report_message, slack_report_message
+
+def create_report(bq_tables, metric, send_notification):
+    """ Creates daily report. This method takes a list of BigQuery tables. For 
each table
+    averages, standard deviation and percentage increase is calculated, those 
values are
+    used to update report messages and provide visible notification about 
changes of given metric.
+    Report is printed out once all tables are processed.
+
+    :param bq_tables: list of BigQuery tables to be included in generated 
report.
+    :param metric: string with metric name.
+    :param send_notification: boolean, if set to True report will be sent to 
Slack.
+    :return: message containing generated report.
+    """
+    report_message = ''
+    slack_report_message = ''
+
+    for bq_table in bq_tables:
+        # Get raw table name
+        bq_table_name = re.sub(r'\"|\[|\]', '', bq_table).strip()
+
+        # Make sure there was data records inserted
+        nb_recent_records = count_queries(bq_table_name, TIME_POINT_1, NOW, 
metric)
+        if nb_recent_records == 0:
+            report_message = report_message + '{} there were no test results 
uploaded in recent 24h. \n'.format(bq_table_name)
+            slack_report_message = slack_report_message + '`{}` there were no 
test results uploaded in recent 24h. :bangbang:\n'.format(bq_table_name)
+        else:
+            average_recent = get_average_from(bq_table_name, TIME_POINT_1, 
NOW, metric)
+            if nb_recent_records > 1:
+                stddev_recent = get_stddev_from(bq_table_name, TIME_POINT_1, 
NOW, metric)
+            else:
+                # Standard deviation is 0 when there is only single value.
+                stddev_recent = 0
+            nb_older_records = count_queries(bq_table_name, TIME_POINT_2, 
TIME_POINT_1, metric)
+            if nb_older_records > 0:
+                percentage_change, stddev_old = \
+                    calculate_historical_data(bq_table_name, TIME_POINT_2, 
TIME_POINT_1, metric, nb_older_records, average_recent)
+                report_message, slack_report_message = \
+                    update_messages(bq_table_name, metric, average_recent, 
percentage_change, stddev_recent, stddev_old, report_message, 
slack_report_message)
+    print(report_message)
+    if send_notification:
+        notify_on_slack(slack_report_message)
+    return report_message
+
+def validate_single_performance_test(bq_tables, metric, send_notification):
+    # This function validates single test, runs after tests execution
+    report_message = ''
+    slack_report_message = ''
+
+    for bq_table in bq_tables:
+        # Get raw table name
+        bq_table_name = re.sub(r'\"|\[|\]', '', bq_table).strip()
+
+        nb_recent_records = count_queries(bq_table_name, TIME_POINT_1, NOW, 
metric)
+        average_recent = get_average_from(bq_table_name, TIME_POINT_1, NOW, 
metric)
+        if nb_recent_records > 1:
+            stddev_recent = get_stddev_from(bq_table_name, TIME_POINT_1, NOW, 
metric)
+        else:
+            stddev_recent = 0
+        nb_older_records = count_queries(bq_table_name, TIME_POINT_2, 
TIME_POINT_1, metric)
+        if nb_older_records > 0:
+            percentage_change, stddev_old = \
+                calculate_historical_data(bq_table_name, TIME_POINT_2, 
TIME_POINT_1, metric, nb_older_records, average_recent)
+            report_message, slack_report_message = \
+                update_messages(bq_table_name, metric, average_recent, 
percentage_change, stddev_recent, stddev_old, report_message, 
slack_report_message)
+    print(report_message)
+    if send_notification:
+        notify_on_slack(slack_report_message)
+    return report_message
+
+def format_slack_message(table_name, metric, avg_time, increase, stddev, 
stddev_old):
+    """ Formats slack message using slack emojis.
+
+    :param table_name: BigQuery table name that was analysed.
+    :param metric: string with metric name.
+    :param avg_time: calculated average time for recent data of given metric.
+    :param increase: percentage increase.
+    :param stddev: calculated standard deviation based on historical data.
+    :param stddev_old: calculated standard deviation based on historical data.
+    :return: formatted message.
+    """
+    table_name_formatted = '`{}`'.format(table_name)
+    # Depending on change value bold or highlight and add emoji
+    if increase is None:
+        increase_formatted = "NA"
+    elif increase >= 0:
+        increase_formatted = '`{:+.3f}%` :-1:'.format(increase)
+    else:
+        increase_formatted = '*{:+.3f}%* :+1:'.format(increase)
+
+    # Bold if value available
+    stddev_formatted = '*{:.2f}*'.format(stddev)
+    stddev_old_formatted = '*{:.2f}*'.format(stddev_old)
+
+    return '{} - \"{}\", avg_time {:.2f}s, change {}, stddev {}, stddev_old {} 
\n'.format(
+        table_name_formatted, metric, avg_time, increase_formatted, 
stddev_formatted, stddev_old_formatted)
+
+def notify_on_slack(message):
+    """ This function sends message to Slack as SLACK_USER to channel defined 
in SLACK_CHANNEL
+    variable using token in SLACK_WEBHOOK_URL variable.
+
+    :param message: message to be send on Slack.
+    :return: None
+    """
+
+    data = {
+        'text': message,
+        'username': SLACK_USER,
+        'icon_emoji': ':robot_face:',
+        'channel': SLACK_CHANNEL
+    }
+    response = requests.post(SLACK_WEBHOOK_URL, data=json.dumps(
+        data), headers={'Content-Type': 'application/json'})
+    print('Response: ' + str(response.text))
+    print('Response code: ' + str(response.status_code))
+
+if __name__ == '__main__':
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--bqtable", help="List of tables you want to check.")
+    parser.add_argument("--metric", help="Metric name you want to validate.")
+    parser.add_argument("--mode", help="Script mode: report/validate", 
default="validate")
+    parser.add_argument("--send_notification", help="Send slack 
notification.", action='store_true')
+
+    args = parser.parse_args()
+    client = bigquery.Client()
+    bq_tables = args.bqtable.split(",")
+    metric = args.metric.strip()
+    send_notification = args.send_notification
+
+    if args.mode == "report":
+        create_report(bq_tables, metric, send_notification)
+    elif args.mode == "validate":
+        validate_single_performance_test(bq_tables, metric, send_notification)
+    else:
+        print("This mode is not supported yet.")
diff --git a/.test-infra/jenkins/verify_performance_test_results_test.py 
b/.test-infra/jenkins/verify_performance_test_results_test.py
new file mode 100644
index 00000000000..22f6f3b52fe
--- /dev/null
+++ b/.test-infra/jenkins/verify_performance_test_results_test.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+#
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+#   This script performs testing of scenarios from 
verify_performance_test_results.py
+#
+
+import unittest, mock
+from mock import patch
+from verify_performance_test_results import create_report
+
+class VerifyAnalysisScript(unittest.TestCase):
+    """Tests for `verify_performance_test_results.py`."""
+
+    def setUp(self):
+        print "Test name:", self._testMethodName
+
+    @patch('verify_performance_test_results.count_queries', return_value=0)
+    def test_create_daily_report_when_no_data_was_uploaded(self, *args):
+        """Testing creating report when no data was uploaded. Expected: Error 
message"""
+        output_message = create_report(["test_bq_table"], "test", False)
+        assert "no test results uploaded in recent 24h." in output_message
+
+    @patch('verify_performance_test_results.count_queries', return_value=1)
+    @patch('verify_performance_test_results.get_average_from', return_value=10)
+    @patch('verify_performance_test_results.get_stddev_from', return_value=10)
+    def test_create_daily_report_when_single_entry_was_uploaded(self, *args):
+        """Testing stddev value when single data entry was uploaded. Expected: 
0"""
+        output_message = create_report(["test_bq_table"], "test", False)
+        assert ", stddev 0.00" in output_message
+
+    @patch('verify_performance_test_results.count_queries', side_effect=[1, 0])
+    @patch('verify_performance_test_results.get_average_from', return_value=10)
+    @patch('verify_performance_test_results.get_stddev_from', return_value=10)
+    def test_create_daily_report_when_no_historical_data_was_uploaded(self, 
*args):
+        """Testing output when no historical data is available. Expected: no 
message."""
+        output_message = create_report(["test_bq_table"], "test", False)
+        self.assertEqual(output_message, "")
+
+    @patch('verify_performance_test_results.count_queries', side_effect=[5, 5])
+    @patch('verify_performance_test_results.get_average_from', 
side_effect=[200, 100])
+    @patch('verify_performance_test_results.get_stddev_from', return_value=10)
+    def test_create_daily_report_when_average_time_increases_twice(self, 
*args):
+        """Testing output when average time increases twice. Expected: 100% 
increase"""
+        output_message = create_report(["test_bq_table"], "test", False)
+        assert ", change +100.000%" in output_message
+
+    @patch('verify_performance_test_results.count_queries', side_effect=[5, 5])
+    @patch('verify_performance_test_results.get_average_from', 
side_effect=[200, 100])
+    @patch('verify_performance_test_results.get_stddev_from', return_value=10)
+    def test_create_daily_report_when_average_time_increases(self, *args):
+        """Testing output when average time increases. Expected: positive 
change"""
+        output_message = create_report(["test_bq_table"], "test", False)
+        assert ", change +" in output_message
+
+    @patch('verify_performance_test_results.count_queries', side_effect=[5, 5])
+    @patch('verify_performance_test_results.get_average_from', 
side_effect=[100, 200])
+    @patch('verify_performance_test_results.get_stddev_from', return_value=10)
+    def test_create_daily_report_when_average_time_decreases(self, *args):
+        """Testing output when average time increases. Expected: negative 
change"""
+        output_message = create_report(["test_bq_table"], "test", False)
+        assert ", change -" in output_message
+
+    @patch('verify_performance_test_results.count_queries', side_effect=[5, 5])
+    @patch('verify_performance_test_results.get_average_from', 
side_effect=[100, 100])
+    @patch('verify_performance_test_results.get_stddev_from', return_value=10)
+    def test_create_daily_report_when_average_time_does_not_change(self, 
*args):
+        """Testing output when average time increases. Expected: zero change"""
+        output_message = create_report(["test_bq_table"], "test", False)
+        assert ", change +0.000%" in output_message
+
+    #TODO: Add more testing scenarios, when single performance tests will be 
finished.
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 104682)
    Time Spent: 11h 50m  (was: 11h 40m)

> Performance Tests Results Analysis and Regression Detection
> -----------------------------------------------------------
>
>                 Key: BEAM-4065
>                 URL: https://issues.apache.org/jira/browse/BEAM-4065
>             Project: Beam
>          Issue Type: Improvement
>          Components: build-system
>            Reporter: Kamil Szewczyk
>            Assignee: Kamil Szewczyk
>            Priority: Major
>          Time Spent: 11h 50m
>  Remaining Estimate: 0h
>
> Performance tests are running on Jenkins on regular basis and results are 
> pushed to BigQuery. However there is no automatic regression detection or 
> daily reports with results.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to