This is an automated email from the ASF dual-hosted git repository.

jli pushed a commit to branch alert-report-test-gaps
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 752a22bf9c09c9da42bcc3952776cd0317459fd5
Author: Joe Li <[email protected]>
AuthorDate: Wed Feb 18 16:30:18 2026 -0800

    test(reports): add Phase 1 + 1b alert/report unit tests
    
    Cover untested filter type branches, schema boundary validation,
    and crash-behavior regression locks for native filter params.
    
    model_test.py (13 new tests):
    - filter_select: null column, empty filter ID fallback
    - filter_time: normal value, empty values IndexError regression
    - filter_timegrain: normal value
    - filter_timecolumn: normal value (verifies missing 'id' key)
    - filter_range: [min,max], min-only, max-only, empty []
    - Unknown filter type returns {}
    - get_native_filters_params: null nativeFilters, rison quote
      escaping, missing nativeFilterId KeyError regression
    
    schemas_test.py (13 new tests):
    - custom_width boundaries (599/600/2400/2401/None) for both
      POST and PUT schemas
    - working_timeout POST min=1 rejection + PUT allow_none
    - log_retention POST min=1 vs PUT min=0 parity
    - Report type disallows database field
    
    Co-Authored-By: Claude Opus 4.6 <[email protected]>
---
 tests/unit_tests/reports/model_test.py   | 173 +++++++++++++++++++++++++++++++
 tests/unit_tests/reports/schemas_test.py |  99 +++++++++++++++++-
 2 files changed, 271 insertions(+), 1 deletion(-)

diff --git a/tests/unit_tests/reports/model_test.py 
b/tests/unit_tests/reports/model_test.py
index 19e12e14072..8f5ec851482 100644
--- a/tests/unit_tests/reports/model_test.py
+++ b/tests/unit_tests/reports/model_test.py
@@ -240,3 +240,176 @@ def test_report_generate_native_filter_no_column_name():
             "ownState": {},
         }
     }
+
+
+def test_report_generate_native_filter_select_null_column():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F1", "filter_select", None, ["US"]
+    )
+    assert result["F1"]["extraFormData"]["filters"][0]["col"] == ""
+    assert result["F1"]["filterState"]["label"] == ""
+
+
+def test_generate_native_filter_time_normal():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F2", "filter_time", "ignored", ["Last week"]
+    )
+    assert result == {
+        "F2": {
+            "id": "F2",
+            "extraFormData": {"time_range": "Last week"},
+            "filterState": {"value": "Last week"},
+            "ownState": {},
+        }
+    }
+
+
+def test_generate_native_filter_time_empty_values():
+    # Regression: locks crash behavior until upstream fix (see PROJECT.md)
+    report_schedule = ReportSchedule()
+    with pytest.raises(IndexError):
+        report_schedule._generate_native_filter("F2", "filter_time", 
"ignored", [])
+
+
+def test_generate_native_filter_timegrain_normal():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F3", "filter_timegrain", "ignored", ["P1D"]
+    )
+    assert result == {
+        "F3": {
+            "id": "F3",
+            "extraFormData": {"time_grain_sqla": "P1D"},
+            "filterState": {"value": ["P1D"]},
+            "ownState": {},
+        }
+    }
+
+
+def test_generate_native_filter_timecolumn_normal():
+    """filter_timecolumn is the only branch missing 'id' in its output."""
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F4", "filter_timecolumn", "ignored", ["ds"]
+    )
+    assert result == {
+        "F4": {
+            "extraFormData": {"granularity_sqla": "ds"},
+            "filterState": {"value": ["ds"]},
+        }
+    }
+    assert "id" not in result["F4"]
+
+
+def test_generate_native_filter_range_normal():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F5", "filter_range", "price", [10, 100]
+    )
+    assert result == {
+        "F5": {
+            "id": "F5",
+            "extraFormData": {
+                "filters": [
+                    {"col": "price", "op": ">=", "val": 10},
+                    {"col": "price", "op": "<=", "val": 100},
+                ]
+            },
+            "filterState": {
+                "value": [10, 100],
+                "label": "10 ≤ x ≤ 100",
+            },
+            "ownState": {},
+        }
+    }
+
+
+def test_generate_native_filter_range_min_only():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F5", "filter_range", "price", [10]
+    )
+    assert result["F5"]["extraFormData"]["filters"] == [
+        {"col": "price", "op": ">=", "val": 10}
+    ]
+    assert result["F5"]["filterState"]["label"] == "x ≥ 10"
+    assert result["F5"]["filterState"]["value"] == [10, None]
+
+
+def test_generate_native_filter_range_max_only():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter(
+        "F5", "filter_range", "price", [None, 100]
+    )
+    assert result["F5"]["extraFormData"]["filters"] == [
+        {"col": "price", "op": "<=", "val": 100}
+    ]
+    assert result["F5"]["filterState"]["label"] == "x ≤ 100"
+
+
+def test_generate_native_filter_range_empty_values():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter("F5", "filter_range", 
"price", [])
+    assert result["F5"]["extraFormData"]["filters"] == []
+    assert result["F5"]["filterState"]["label"] == ""
+    assert result["F5"]["filterState"]["value"] == [None, None]
+
+
+def test_generate_native_filter_unknown_type():
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter("F6", "unknown_type", 
"col", ["x"])
+    assert result == {}
+
+
+def test_get_native_filters_params_null_native_filters():
+    report_schedule = ReportSchedule()
+    report_schedule.extra = {"dashboard": {"nativeFilters": None}}
+    assert report_schedule.get_native_filters_params() == "()"
+
+
+def test_get_native_filters_params_rison_quote_escaping():
+    report_schedule = ReportSchedule()
+    report_schedule.extra = {
+        "dashboard": {
+            "nativeFilters": [
+                {
+                    "nativeFilterId": "F1",
+                    "filterType": "filter_select",
+                    "columnName": "name",
+                    "filterValues": ["O'Brien"],
+                }
+            ]
+        }
+    }
+    result = report_schedule.get_native_filters_params()
+    assert "'" not in result
+    assert "%27" in result
+
+
+def test_get_native_filters_params_missing_filter_id_key():
+    # Regression: locks crash behavior until upstream fix (see PROJECT.md)
+    report_schedule = ReportSchedule()
+    report_schedule.extra = {
+        "dashboard": {
+            "nativeFilters": [
+                {
+                    "filterType": "filter_select",
+                    "columnName": "col",
+                    "filterValues": ["v"],
+                    # Missing "nativeFilterId" key
+                }
+            ]
+        }
+    }
+    with pytest.raises(KeyError, match="nativeFilterId"):
+        report_schedule.get_native_filters_params()
+
+
+def test_generate_native_filter_empty_filter_id():
+    """Empty native_filter_id triggers the ``or ""`` fallback branches."""
+    report_schedule = ReportSchedule()
+    result = report_schedule._generate_native_filter("", "filter_select", 
"col", ["x"])
+    assert "" in result
+    assert result[""]["id"] == ""
diff --git a/tests/unit_tests/reports/schemas_test.py 
b/tests/unit_tests/reports/schemas_test.py
index cf72149070b..a21a332289c 100644
--- a/tests/unit_tests/reports/schemas_test.py
+++ b/tests/unit_tests/reports/schemas_test.py
@@ -19,7 +19,7 @@ import pytest
 from marshmallow import ValidationError
 from pytest_mock import MockerFixture
 
-from superset.reports.schemas import ReportSchedulePostSchema
+from superset.reports.schemas import ReportSchedulePostSchema, 
ReportSchedulePutSchema
 
 
 def test_report_post_schema_custom_width_validation(mocker: MockerFixture) -> 
None:
@@ -75,3 +75,100 @@ def test_report_post_schema_custom_width_validation(mocker: 
MockerFixture) -> No
     assert excinfo.value.messages == {
         "custom_width": ["Screenshot width must be between 100px and 200px"]
     }
+
+
+MINIMAL_POST_PAYLOAD = {
+    "type": "Report",
+    "name": "A report",
+    "crontab": "* * * * *",
+    "timezone": "America/Los_Angeles",
+}
+
+CUSTOM_WIDTH_CONFIG = {
+    "ALERT_REPORTS_MIN_CUSTOM_SCREENSHOT_WIDTH": 600,
+    "ALERT_REPORTS_MAX_CUSTOM_SCREENSHOT_WIDTH": 2400,
+}
+
+
[email protected](
+    "schema_class,payload_base",
+    [
+        (ReportSchedulePostSchema, MINIMAL_POST_PAYLOAD),
+        (ReportSchedulePutSchema, {}),
+    ],
+    ids=["post", "put"],
+)
[email protected](
+    "width,should_pass",
+    [
+        (599, False),
+        (600, True),
+        (2400, True),
+        (2401, False),
+        (None, True),
+    ],
+)
+def test_custom_width_boundary_values(
+    mocker: MockerFixture,
+    schema_class: type,
+    payload_base: dict[str, object],
+    width: int | None,
+    should_pass: bool,
+) -> None:
+    mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+    schema = schema_class()
+    payload = {**payload_base, "custom_width": width}
+
+    if should_pass:
+        schema.load(payload)
+    else:
+        with pytest.raises(ValidationError) as exc:
+            schema.load(payload)
+        assert "custom_width" in exc.value.messages
+
+
+def test_working_timeout_validation(mocker: MockerFixture) -> None:
+    mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+    post_schema = ReportSchedulePostSchema()
+    put_schema = ReportSchedulePutSchema()
+
+    # POST: working_timeout=0 and -1 are invalid (min=1)
+    with pytest.raises(ValidationError) as exc:
+        post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": 0})
+    assert "working_timeout" in exc.value.messages
+
+    with pytest.raises(ValidationError) as exc:
+        post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": -1})
+    assert "working_timeout" in exc.value.messages
+
+    # POST: working_timeout=1 is valid
+    post_schema.load({**MINIMAL_POST_PAYLOAD, "working_timeout": 1})
+
+    # PUT: working_timeout=None is valid (allow_none=True)
+    put_schema.load({"working_timeout": None})
+
+
+def test_log_retention_post_vs_put_parity(mocker: MockerFixture) -> None:
+    mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+    post_schema = ReportSchedulePostSchema()
+    put_schema = ReportSchedulePutSchema()
+
+    # POST: log_retention=0 is invalid (min=1)
+    with pytest.raises(ValidationError) as exc:
+        post_schema.load({**MINIMAL_POST_PAYLOAD, "log_retention": 0})
+    assert "log_retention" in exc.value.messages
+
+    # POST: log_retention=1 is valid
+    post_schema.load({**MINIMAL_POST_PAYLOAD, "log_retention": 1})
+
+    # PUT: log_retention=0 is valid (min=0)
+    put_schema.load({"log_retention": 0})
+
+
+def test_report_type_disallows_database(mocker: MockerFixture) -> None:
+    mocker.patch("flask.current_app.config", CUSTOM_WIDTH_CONFIG)
+    schema = ReportSchedulePostSchema()
+
+    with pytest.raises(ValidationError) as exc:
+        schema.load({**MINIMAL_POST_PAYLOAD, "database": 1})
+    assert "database" in exc.value.messages

Reply via email to