bin/crashreportScraper.py | 13 ------------- 1 file changed, 13 deletions(-)
New commits: commit 40bab7e27f13becb45055c9cfcd52aebf6128536 Author: Xisco Fauli <[email protected]> AuthorDate: Mon Feb 12 11:11:17 2024 +0100 Commit: Xisco Fauli <[email protected]> CommitDate: Tue Feb 13 10:33:42 2024 +0100 crashreportScraper: no need to go to the last page anymore JJ changed the way the reports are sorted to show the most recent ones on page 1 Change-Id: I59d566ff45fd8a75263b70a1e436e4263422e93b Reviewed-on: https://gerrit.libreoffice.org/c/core/+/163239 Tested-by: Jenkins Reviewed-by: Xisco Fauli <[email protected]> diff --git a/bin/crashreportScraper.py b/bin/crashreportScraper.py index 876570d3a028..78b2f6cb5eb2 100755 --- a/bin/crashreportScraper.py +++ b/bin/crashreportScraper.py @@ -71,19 +71,6 @@ def parse_reports_and_get_most_recent_report_from_last_page(url): td_list = tr.find_all("td") count += int(td_list[1].text.strip()) - # There are 50 reports on each page. - # Go to the last page based on the total count to get a recent report - last_page = math.ceil( count / 50 ) - - if last_page > 1: - url = url + "?page=" + str(last_page) - try: - html_text = requests.get(url, timeout=200).text - soup = BeautifulSoup(html_text, 'html.parser') - except requests.exceptions.Timeout: - print("Timeout") - raise - reports = soup.find("div", {"id": "reports"}).tbody ID, currentID = "", "" version, currentVersion = "", ""
