This is an automated email from the ASF dual-hosted git repository.
potiuk pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/airflow.git
The following commit(s) were added to refs/heads/main by this push:
new 48930bc0b4 Refactor: Consolidate import pytest (#34190)
48930bc0b4 is described below
commit 48930bc0b4c159180cc55c8bd079cdfec9cedb21
Author: Miroslav Šedivý <[email protected]>
AuthorDate: Mon Sep 11 13:28:26 2023 +0000
Refactor: Consolidate import pytest (#34190)
---
kubernetes_tests/test_kubernetes_pod_operator.py | 9 +++--
tests/cli/commands/test_db_command.py | 9 +++--
tests/core/test_configuration.py | 5 ++-
tests/core/test_otel_logger.py | 11 ++++--
tests/executors/test_base_executor.py | 3 +-
tests/hooks/test_package_index.py | 14 ++++----
tests/listeners/test_listeners.py | 2 +-
tests/models/test_base.py | 11 +++---
tests/models/test_taskinstance.py | 33 +++++++++---------
tests/providers/amazon/aws/hooks/test_eks.py | 3 +-
tests/providers/amazon/aws/hooks/test_ssm.py | 9 ++---
tests/providers/asana/hooks/test_asana.py | 5 ++-
.../cncf/kubernetes/operators/test_pod.py | 9 +++--
.../kubernetes/test_kubernetes_helper_functions.py | 3 +-
.../cncf/kubernetes/test_pod_generator.py | 25 +++++++-------
.../providers/google/cloud/hooks/test_dataprep.py | 5 ++-
.../google/cloud/log/test_gcs_task_handler.py | 5 +--
.../google/cloud/operators/test_dataflow.py | 2 +-
.../google/cloud/operators/test_datapipeline.py | 2 +-
tests/providers/grpc/hooks/test_grpc.py | 5 ++-
tests/providers/microsoft/azure/hooks/test_adx.py | 5 ++-
.../azure/hooks/test_azure_data_factory.py | 9 +++--
.../microsoft/azure/hooks/test_azure_synapse.py | 3 +-
tests/providers/microsoft/psrp/hooks/test_psrp.py | 3 +-
.../providers/salesforce/hooks/test_salesforce.py | 5 ++-
tests/providers/slack/hooks/test_slack.py | 7 ++--
tests/providers/slack/hooks/test_slack_webhook.py | 7 ++--
tests/providers/yandex/hooks/test_yandex.py | 5 ++-
tests/ti_deps/deps/test_trigger_rule_dep.py | 23 ++++++-------
tests/utils/test_dates.py | 9 +++--
tests/utils/test_db_cleanup.py | 39 +++++++++++-----------
tests/utils/test_sqlalchemy.py | 9 +++--
tests/www/views/test_views_connection.py | 7 ++--
33 files changed, 141 insertions(+), 160 deletions(-)
diff --git a/kubernetes_tests/test_kubernetes_pod_operator.py
b/kubernetes_tests/test_kubernetes_pod_operator.py
index 7534cec5f0..899bcf1f6f 100644
--- a/kubernetes_tests/test_kubernetes_pod_operator.py
+++ b/kubernetes_tests/test_kubernetes_pod_operator.py
@@ -33,7 +33,6 @@ from kubernetes import client
from kubernetes.client import V1EnvVar, V1PodSecurityContext,
V1SecurityContext, models as k8s
from kubernetes.client.api_client import ApiClient
from kubernetes.client.rest import ApiException
-from pytest import param
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.models import DAG, Connection, DagRun, TaskInstance
@@ -395,7 +394,7 @@ class TestKubernetesPodOperatorSystem:
@pytest.mark.parametrize(
"val",
[
- param(
+ pytest.param(
k8s.V1Affinity(
node_affinity=k8s.V1NodeAffinity(
required_during_scheduling_ignored_during_execution=k8s.V1NodeSelector(
@@ -415,7 +414,7 @@ class TestKubernetesPodOperatorSystem:
),
id="current",
),
- param(
+ pytest.param(
{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
@@ -729,8 +728,8 @@ class TestKubernetesPodOperatorSystem:
@pytest.mark.parametrize(
"env_vars",
[
- param([k8s.V1EnvVar(name="env_name", value="value")],
id="current"),
- param({"env_name": "value"}, id="backcompat"), # todo: remove?
+ pytest.param([k8s.V1EnvVar(name="env_name", value="value")],
id="current"),
+ pytest.param({"env_name": "value"}, id="backcompat"), # todo:
remove?
],
)
def test_pod_template_file_with_overrides_system(self, env_vars,
test_label, mock_get_connection):
diff --git a/tests/cli/commands/test_db_command.py
b/tests/cli/commands/test_db_command.py
index 4bfb80d4f5..b9809eaeba 100644
--- a/tests/cli/commands/test_db_command.py
+++ b/tests/cli/commands/test_db_command.py
@@ -21,7 +21,6 @@ from unittest.mock import MagicMock, Mock, call, patch
import pendulum
import pytest
-from pytest import param
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import OperationalError
@@ -102,18 +101,18 @@ class TestCliDb:
@pytest.mark.parametrize(
"args, pattern",
[
- param(["--to-version", "2.1.25"], "not supported", id="bad
version"),
- param(
+ pytest.param(["--to-version", "2.1.25"], "not supported", id="bad
version"),
+ pytest.param(
["--to-revision", "abc", "--from-revision", "abc123"],
"used with `--show-sql-only`",
id="requires offline",
),
- param(
+ pytest.param(
["--to-revision", "abc", "--from-version", "2.0.2"],
"used with `--show-sql-only`",
id="requires offline",
),
- param(
+ pytest.param(
["--to-revision", "abc", "--from-version", "2.1.25",
"--show-sql-only"],
"Unknown version",
id="bad version",
diff --git a/tests/core/test_configuration.py b/tests/core/test_configuration.py
index eb66c255b8..2863899dc8 100644
--- a/tests/core/test_configuration.py
+++ b/tests/core/test_configuration.py
@@ -28,7 +28,6 @@ from unittest import mock
from unittest.mock import patch
import pytest
-from pytest import param
from airflow import configuration
from airflow.configuration import (
@@ -1482,8 +1481,8 @@ sql_alchemy_conn=sqlite://test
@pytest.mark.parametrize(
"key",
[
- param("deactivate_stale_dags_interval", id="old"),
- param("parsing_cleanup_interval", id="new"),
+ pytest.param("deactivate_stale_dags_interval", id="old"),
+ pytest.param("parsing_cleanup_interval", id="new"),
],
)
def test_future_warning_only_for_code_ref(self, key):
diff --git a/tests/core/test_otel_logger.py b/tests/core/test_otel_logger.py
index 1f04edd1bf..130d84265c 100644
--- a/tests/core/test_otel_logger.py
+++ b/tests/core/test_otel_logger.py
@@ -23,7 +23,6 @@ from unittest.mock import ANY
import pytest
from opentelemetry.metrics import MeterProvider
-from pytest import param
from airflow.exceptions import InvalidStatsNameException
from airflow.metrics.otel_logger import (
@@ -76,8 +75,14 @@ class TestOtelMetrics:
@pytest.mark.parametrize(
"invalid_stat_combo",
[
- *[param(("prefix", name), id=f"Stat name {msg}.") for (name, msg)
in INVALID_STAT_NAME_CASES],
- *[param((prefix, "name"), id=f"Stat prefix {msg}.") for (prefix,
msg) in INVALID_STAT_NAME_CASES],
+ *[
+ pytest.param(("prefix", name), id=f"Stat name {msg}.")
+ for (name, msg) in INVALID_STAT_NAME_CASES
+ ],
+ *[
+ pytest.param((prefix, "name"), id=f"Stat prefix {msg}.")
+ for (prefix, msg) in INVALID_STAT_NAME_CASES
+ ],
],
)
def test_invalid_stat_names_are_caught(self, invalid_stat_combo):
diff --git a/tests/executors/test_base_executor.py
b/tests/executors/test_base_executor.py
index 7d7f21ac0d..7c21949b53 100644
--- a/tests/executors/test_base_executor.py
+++ b/tests/executors/test_base_executor.py
@@ -23,7 +23,6 @@ from unittest import mock
import pendulum
import pytest
import time_machine
-from pytest import mark
from airflow.executors.base_executor import BaseExecutor,
RunningRetryAttemptType
from airflow.models.baseoperator import BaseOperator
@@ -132,7 +131,7 @@ def setup_trigger_tasks(dag_maker):
return executor, dagrun
[email protected]("open_slots", [1, 2, 3])
[email protected]("open_slots", [1, 2, 3])
def test_trigger_queued_tasks(dag_maker, open_slots):
executor, _ = setup_trigger_tasks(dag_maker)
executor.trigger_tasks(open_slots)
diff --git a/tests/hooks/test_package_index.py
b/tests/hooks/test_package_index.py
index 375898eb06..169d6cce5d 100644
--- a/tests/hooks/test_package_index.py
+++ b/tests/hooks/test_package_index.py
@@ -18,7 +18,7 @@
"""Test for Package Index Hook."""
from __future__ import annotations
-from pytest import FixtureRequest, MonkeyPatch, fixture, mark, raises
+import pytest
from airflow.hooks.package_index import PackageIndexHook
from airflow.models.connection import Connection
@@ -60,11 +60,11 @@ PI_MOCK_TESTDATA = {
}
-@fixture(
[email protected](
params=list(PI_MOCK_TESTDATA.values()),
ids=list(PI_MOCK_TESTDATA.keys()),
)
-def mock_get_connection(monkeypatch: MonkeyPatch, request: FixtureRequest) ->
str | None:
+def mock_get_connection(monkeypatch: pytest.MonkeyPatch, request:
pytest.FixtureRequest) -> str | None:
"""Pytest Fixture."""
testdata: dict[str, str | None] = request.param
host: str | None = testdata.get("host", None)
@@ -86,12 +86,12 @@ def test_get_connection_url(mock_get_connection: str |
None):
connection_url = hook_instance.get_connection_url()
assert connection_url == expected_result
else:
- with raises(Exception):
+ with pytest.raises(Exception):
hook_instance.get_connection_url()
[email protected]("success", [0, 1])
-def test_test_connection(monkeypatch: MonkeyPatch, mock_get_connection: str |
None, success: int):
[email protected]("success", [0, 1])
+def test_test_connection(monkeypatch: pytest.MonkeyPatch, mock_get_connection:
str | None, success: int):
"""Test if connection test responds correctly to return code."""
def mock_run(*_, **__):
@@ -110,7 +110,7 @@ def test_test_connection(monkeypatch: MonkeyPatch,
mock_get_connection: str | No
result = hook_instance.test_connection()
assert result[0] == (success == 0)
else:
- with raises(Exception):
+ with pytest.raises(Exception):
hook_instance.test_connection()
diff --git a/tests/listeners/test_listeners.py
b/tests/listeners/test_listeners.py
index ac147d08f1..166c3a5f03 100644
--- a/tests/listeners/test_listeners.py
+++ b/tests/listeners/test_listeners.py
@@ -19,7 +19,7 @@ from __future__ import annotations
import contextlib
import os
-import pytest as pytest
+import pytest
from airflow import AirflowException
from airflow.jobs.job import Job, run_job
diff --git a/tests/models/test_base.py b/tests/models/test_base.py
index 12047062d5..8afe421817 100644
--- a/tests/models/test_base.py
+++ b/tests/models/test_base.py
@@ -17,7 +17,6 @@
from __future__ import annotations
import pytest
-from pytest import param
from airflow.models.base import get_id_collation_args
from tests.test_utils.config import conf_vars
@@ -26,16 +25,16 @@ from tests.test_utils.config import conf_vars
@pytest.mark.parametrize(
("dsn", "expected", "extra"),
[
- param("postgresql://host/the_database", {}, {}, id="postgres"),
- param("mysql://host/the_database", {"collation": "utf8mb3_bin"}, {},
id="mysql"),
- param("mysql+pymsql://host/the_database", {"collation":
"utf8mb3_bin"}, {}, id="mysql+pymsql"),
- param(
+ pytest.param("postgresql://host/the_database", {}, {}, id="postgres"),
+ pytest.param("mysql://host/the_database", {"collation":
"utf8mb3_bin"}, {}, id="mysql"),
+ pytest.param("mysql+pymsql://host/the_database", {"collation":
"utf8mb3_bin"}, {}, id="mysql+pymsql"),
+ pytest.param(
"mysql://host/the_database",
{"collation": "ascii"},
{("database", "sql_engine_collation_for_ids"): "ascii"},
id="mysql with explicit config",
),
- param(
+ pytest.param(
"postgresql://host/the_database",
{"collation": "ascii"},
{("database", "sql_engine_collation_for_ids"): "ascii"},
diff --git a/tests/models/test_taskinstance.py
b/tests/models/test_taskinstance.py
index a7698fc236..be0cd69967 100644
--- a/tests/models/test_taskinstance.py
+++ b/tests/models/test_taskinstance.py
@@ -35,7 +35,6 @@ from uuid import uuid4
import pendulum
import pytest
import time_machine
-from pytest import param
from airflow import models, settings
from airflow.decorators import task, task_group
@@ -1165,7 +1164,7 @@ class TestTaskInstance:
# expect_passed
# states: success, skipped, failed, upstream_failed, removed,
done, success_setup, skipped_setup
# all setups succeeded - one
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(6, 0, 0, 0, 0, 6, 1, 0),
@@ -1174,7 +1173,7 @@ class TestTaskInstance:
True,
id="all setups succeeded - one",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(7, 0, 0, 0, 0, 7, 2, 0),
@@ -1183,7 +1182,7 @@ class TestTaskInstance:
True,
id="all setups succeeded - two",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(5, 0, 1, 0, 0, 6, 0, 0),
@@ -1192,7 +1191,7 @@ class TestTaskInstance:
False,
id="setups failed - one",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(5, 0, 2, 0, 0, 7, 0, 0),
@@ -1201,7 +1200,7 @@ class TestTaskInstance:
False,
id="setups failed - two",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(5, 1, 0, 0, 0, 6, 0, 1),
@@ -1210,7 +1209,7 @@ class TestTaskInstance:
False,
id="setups skipped - one",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(5, 2, 0, 0, 0, 7, 0, 2),
@@ -1219,7 +1218,7 @@ class TestTaskInstance:
False,
id="setups skipped - two",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(5, 1, 1, 0, 0, 7, 0, 1),
@@ -1228,7 +1227,7 @@ class TestTaskInstance:
False,
id="one setup failed one setup skipped",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(6, 0, 1, 0, 0, 7, 1, 0),
@@ -1237,7 +1236,7 @@ class TestTaskInstance:
True,
id="is teardown one setup failed one setup success",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(6, 0, 1, 0, 0, 7, 1, 0),
@@ -1246,7 +1245,7 @@ class TestTaskInstance:
True,
id="not teardown one setup failed one setup success",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(6, 1, 0, 0, 0, 7, 1, 1),
@@ -1255,7 +1254,7 @@ class TestTaskInstance:
True,
id="is teardown one setup success one setup skipped",
),
- param(
+ pytest.param(
"all_done_setup_success",
2,
_UpstreamTIStates(6, 1, 0, 0, 0, 7, 1, 1),
@@ -1264,7 +1263,7 @@ class TestTaskInstance:
True,
id="not teardown one setup success one setup skipped",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(3, 0, 0, 0, 0, 3, 1, 0),
@@ -1273,7 +1272,7 @@ class TestTaskInstance:
False,
id="not all done",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(3, 0, 1, 0, 0, 4, 1, 0),
@@ -1282,7 +1281,7 @@ class TestTaskInstance:
False,
id="is teardown not all done one failed",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(3, 0, 1, 0, 0, 4, 1, 0),
@@ -1291,7 +1290,7 @@ class TestTaskInstance:
False,
id="not teardown not all done one failed",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(3, 1, 0, 0, 0, 4, 1, 0),
@@ -1300,7 +1299,7 @@ class TestTaskInstance:
False,
id="not all done one skipped",
),
- param(
+ pytest.param(
"all_done_setup_success",
1,
_UpstreamTIStates(3, 1, 0, 0, 0, 4, 1, 0),
diff --git a/tests/providers/amazon/aws/hooks/test_eks.py
b/tests/providers/amazon/aws/hooks/test_eks.py
index d842a69a61..8d8aa9f6f8 100644
--- a/tests/providers/amazon/aws/hooks/test_eks.py
+++ b/tests/providers/amazon/aws/hooks/test_eks.py
@@ -97,7 +97,6 @@ if TYPE_CHECKING:
from datetime import datetime
from moto.core.exceptions import AWSError
- from pytest import ExceptionInfo
@pytest.fixture(scope="function")
@@ -1332,7 +1331,7 @@ def assert_all_arn_values_are_valid(expected_arn_values,
pattern, arn_under_test
def assert_client_error_exception_thrown(
- expected_exception: type[AWSError], expected_msg: str, raised_exception:
ExceptionInfo
+ expected_exception: type[AWSError], expected_msg: str, raised_exception:
pytest.ExceptionInfo
) -> None:
"""
Asserts that the raised exception is of the expected type
diff --git a/tests/providers/amazon/aws/hooks/test_ssm.py
b/tests/providers/amazon/aws/hooks/test_ssm.py
index 8ae1e97fc8..f47e0a5d71 100644
--- a/tests/providers/amazon/aws/hooks/test_ssm.py
+++ b/tests/providers/amazon/aws/hooks/test_ssm.py
@@ -22,7 +22,6 @@ from unittest import mock
import botocore.exceptions
import pytest
from moto import mock_ssm
-from pytest import param
from airflow.providers.amazon.aws.hooks.ssm import SsmHook
@@ -60,9 +59,11 @@ class TestSsmHook:
@pytest.mark.parametrize(
"param_name, default_value, expected_result",
[
- param(EXISTING_PARAM_NAME, None, PARAM_VALUE,
id="param_exists_no_default_provided"),
- param(EXISTING_PARAM_NAME, DEFAULT_VALUE, PARAM_VALUE,
id="param_exists_with_default"),
- param(BAD_PARAM_NAME, DEFAULT_VALUE, DEFAULT_VALUE,
id="param_does_not_exist_uses_default"),
+ pytest.param(EXISTING_PARAM_NAME, None, PARAM_VALUE,
id="param_exists_no_default_provided"),
+ pytest.param(EXISTING_PARAM_NAME, DEFAULT_VALUE, PARAM_VALUE,
id="param_exists_with_default"),
+ pytest.param(
+ BAD_PARAM_NAME, DEFAULT_VALUE, DEFAULT_VALUE,
id="param_does_not_exist_uses_default"
+ ),
],
)
def test_get_parameter_value_happy_cases(self, param_name, default_value,
expected_result) -> None:
diff --git a/tests/providers/asana/hooks/test_asana.py
b/tests/providers/asana/hooks/test_asana.py
index 7b7855b800..b2412320ea 100644
--- a/tests/providers/asana/hooks/test_asana.py
+++ b/tests/providers/asana/hooks/test_asana.py
@@ -21,7 +21,6 @@ from unittest.mock import patch
import pytest
from asana import Client
-from pytest import param
from airflow.models import Connection
from airflow.providers.asana.hooks.asana import AsanaHook
@@ -254,11 +253,11 @@ class TestAsanaHook:
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://?extra__asana__workspace=abc&extra__asana__project=abc",
id="prefix",
),
- param("a://?workspace=abc&project=abc", id="no-prefix"),
+ pytest.param("a://?workspace=abc&project=abc", id="no-prefix"),
],
)
def test_backcompat_prefix_works(self, uri):
diff --git a/tests/providers/cncf/kubernetes/operators/test_pod.py
b/tests/providers/cncf/kubernetes/operators/test_pod.py
index fde4e60159..72254e231b 100644
--- a/tests/providers/cncf/kubernetes/operators/test_pod.py
+++ b/tests/providers/cncf/kubernetes/operators/test_pod.py
@@ -24,7 +24,6 @@ from unittest.mock import MagicMock, patch
import pendulum
import pytest
from kubernetes.client import ApiClient, V1PodSecurityContext, V1PodStatus,
models as k8s
-from pytest import param
from urllib3 import HTTPResponse
from urllib3.packages.six import BytesIO
@@ -205,8 +204,8 @@ class TestKubernetesPodOperator:
@pytest.mark.parametrize(
"input",
[
- param([k8s.V1EnvVar(name="{{ bar }}", value="{{ foo }}")],
id="current"),
- param({"{{ bar }}": "{{ foo }}"}, id="backcompat"),
+ pytest.param([k8s.V1EnvVar(name="{{ bar }}", value="{{ foo }}")],
id="current"),
+ pytest.param({"{{ bar }}": "{{ foo }}"}, id="backcompat"),
],
)
def test_env_vars(self, input):
@@ -377,8 +376,8 @@ class TestKubernetesPodOperator:
@pytest.mark.parametrize(
"val",
[
- param([k8s.V1LocalObjectReference("fakeSecret")], id="current"),
- param("fakeSecret", id="backcompat"),
+ pytest.param([k8s.V1LocalObjectReference("fakeSecret")],
id="current"),
+ pytest.param("fakeSecret", id="backcompat"),
],
)
def test_image_pull_secrets_correctly_set(self, val):
diff --git
a/tests/providers/cncf/kubernetes/test_kubernetes_helper_functions.py
b/tests/providers/cncf/kubernetes/test_kubernetes_helper_functions.py
index d90ac92b6c..4f97f0a1da 100644
--- a/tests/providers/cncf/kubernetes/test_kubernetes_helper_functions.py
+++ b/tests/providers/cncf/kubernetes/test_kubernetes_helper_functions.py
@@ -20,7 +20,6 @@ from __future__ import annotations
import re
import pytest
-from pytest import param
from airflow.providers.cncf.kubernetes.kubernetes_helper_functions import
create_pod_id
from airflow.providers.cncf.kubernetes.operators.pod import _create_pod_id
@@ -32,7 +31,7 @@ pod_name_regex =
r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])
# we added this function to provider temporarily until min airflow version
catches up
# meanwhile, we use this one test to test both core and provider
@pytest.mark.parametrize(
- "create_pod_id", [param(_create_pod_id, id="provider"),
param(create_pod_id, id="core")]
+ "create_pod_id", [pytest.param(_create_pod_id, id="provider"),
pytest.param(create_pod_id, id="core")]
)
class TestCreatePodId:
@pytest.mark.parametrize(
diff --git a/tests/providers/cncf/kubernetes/test_pod_generator.py
b/tests/providers/cncf/kubernetes/test_pod_generator.py
index b2c16177cb..2739781858 100644
--- a/tests/providers/cncf/kubernetes/test_pod_generator.py
+++ b/tests/providers/cncf/kubernetes/test_pod_generator.py
@@ -26,7 +26,6 @@ import pendulum
import pytest
from dateutil import parser
from kubernetes.client import ApiClient, models as k8s
-from pytest import param
from airflow import __version__
from airflow.exceptions import AirflowConfigException
@@ -398,8 +397,8 @@ class TestPodGenerator:
@pytest.mark.parametrize(
"config_image, expected_image",
[
- param("my_image:my_tag", "my_image:my_tag", id="image_in_cfg"),
- param(None, "busybox", id="no_image_in_cfg"),
+ pytest.param("my_image:my_tag", "my_image:my_tag",
id="image_in_cfg"),
+ pytest.param(None, "busybox", id="no_image_in_cfg"),
],
)
def test_construct_pod(self, config_image, expected_image):
@@ -721,10 +720,10 @@ class TestPodGenerator:
@pytest.mark.parametrize(
"input",
(
- param("a" * 70, id="max_label_length"),
- param("a" * 253, id="max_subdomain_length"),
- param("a" * 95, id="close to max"),
- param("aaa", id="tiny"),
+ pytest.param("a" * 70, id="max_label_length"),
+ pytest.param("a" * 253, id="max_subdomain_length"),
+ pytest.param("a" * 95, id="close to max"),
+ pytest.param("aaa", id="tiny"),
),
)
def test_pod_name_confirm_to_max_length(self, input):
@@ -780,16 +779,16 @@ class TestPodGenerator:
@pytest.mark.parametrize(
"extra, extra_expected",
[
- param(dict(), {}, id="base"),
- param(dict(airflow_worker=2), {"airflow-worker": "2"},
id="worker"),
- param(dict(map_index=2), {"map_index": "2"}, id="map_index"),
- param(dict(run_id="2"), {"run_id": "2"}, id="run_id"),
- param(
+ pytest.param(dict(), {}, id="base"),
+ pytest.param(dict(airflow_worker=2), {"airflow-worker": "2"},
id="worker"),
+ pytest.param(dict(map_index=2), {"map_index": "2"},
id="map_index"),
+ pytest.param(dict(run_id="2"), {"run_id": "2"}, id="run_id"),
+ pytest.param(
dict(execution_date=now),
{"execution_date": datetime_to_label_safe_datestring(now)},
id="date",
),
- param(
+ pytest.param(
dict(airflow_worker=2, map_index=2, run_id="2",
execution_date=now),
{
"airflow-worker": "2",
diff --git a/tests/providers/google/cloud/hooks/test_dataprep.py
b/tests/providers/google/cloud/hooks/test_dataprep.py
index 7c132b9041..a6634162e9 100644
--- a/tests/providers/google/cloud/hooks/test_dataprep.py
+++ b/tests/providers/google/cloud/hooks/test_dataprep.py
@@ -23,7 +23,6 @@ from unittest import mock
from unittest.mock import patch
import pytest
-from pytest import param
from requests import HTTPError
from tenacity import RetryError
@@ -262,8 +261,8 @@ class TestGoogleDataprepHook:
@pytest.mark.parametrize(
"uri",
[
-
param("a://?extra__dataprep__token=abc&extra__dataprep__base_url=abc",
id="prefix"),
- param("a://?token=abc&base_url=abc", id="no-prefix"),
+
pytest.param("a://?extra__dataprep__token=abc&extra__dataprep__base_url=abc",
id="prefix"),
+ pytest.param("a://?token=abc&base_url=abc", id="no-prefix"),
],
)
def test_conn_extra_backcompat_prefix(self, uri):
diff --git a/tests/providers/google/cloud/log/test_gcs_task_handler.py
b/tests/providers/google/cloud/log/test_gcs_task_handler.py
index ead895a334..95cbcb81fb 100644
--- a/tests/providers/google/cloud/log/test_gcs_task_handler.py
+++ b/tests/providers/google/cloud/log/test_gcs_task_handler.py
@@ -23,7 +23,6 @@ from unittest import mock
from unittest.mock import MagicMock
import pytest
-from pytest import param
from airflow.providers.google.cloud.log.gcs_task_handler import GCSTaskHandler
from airflow.utils.state import TaskInstanceState
@@ -63,7 +62,9 @@ class TestGCSTaskHandler:
@mock.patch("airflow.providers.google.cloud.log.gcs_task_handler.GCSHook")
@mock.patch("google.cloud.storage.Client")
@mock.patch("airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id")
- @pytest.mark.parametrize("conn_id", [param("", id="no-conn"),
param("my_gcs_conn", id="with-conn")])
+ @pytest.mark.parametrize(
+ "conn_id", [pytest.param("", id="no-conn"),
pytest.param("my_gcs_conn", id="with-conn")]
+ )
def test_client_conn_id_behavior(self, mock_get_cred, mock_client,
mock_hook, conn_id):
"""When remote log conn id configured, hook will be used"""
mock_hook.return_value.get_credentials_and_project_id.return_value =
("test_cred", "test_proj")
diff --git a/tests/providers/google/cloud/operators/test_dataflow.py
b/tests/providers/google/cloud/operators/test_dataflow.py
index 301a69962e..8c0970b55c 100644
--- a/tests/providers/google/cloud/operators/test_dataflow.py
+++ b/tests/providers/google/cloud/operators/test_dataflow.py
@@ -21,7 +21,7 @@ import copy
from copy import deepcopy
from unittest import mock
-import pytest as pytest
+import pytest
import airflow
from airflow.providers.google.cloud.hooks.dataflow import DataflowJobStatus
diff --git a/tests/providers/google/cloud/operators/test_datapipeline.py
b/tests/providers/google/cloud/operators/test_datapipeline.py
index eab6e4cf23..6f0af0a848 100644
--- a/tests/providers/google/cloud/operators/test_datapipeline.py
+++ b/tests/providers/google/cloud/operators/test_datapipeline.py
@@ -19,7 +19,7 @@ from __future__ import annotations
from unittest import mock
-import pytest as pytest
+import pytest
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.operators.datapipeline import (
diff --git a/tests/providers/grpc/hooks/test_grpc.py
b/tests/providers/grpc/hooks/test_grpc.py
index 46ae9896e7..8536188bb1 100644
--- a/tests/providers/grpc/hooks/test_grpc.py
+++ b/tests/providers/grpc/hooks/test_grpc.py
@@ -22,7 +22,6 @@ from unittest import mock
from unittest.mock import patch
import pytest
-from pytest import param
from airflow.exceptions import AirflowConfigException
from airflow.models import Connection
@@ -272,11 +271,11 @@ class TestGrpcHook:
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://abc:50?extra__grpc__auth_type=NO_AUTH",
id="prefix",
),
- param("a://abc:50?auth_type=NO_AUTH", id="no-prefix"),
+ pytest.param("a://abc:50?auth_type=NO_AUTH", id="no-prefix"),
],
)
@patch("airflow.providers.grpc.hooks.grpc.grpc.insecure_channel")
diff --git a/tests/providers/microsoft/azure/hooks/test_adx.py
b/tests/providers/microsoft/azure/hooks/test_adx.py
index 2268f8b8a6..827d8b8a5b 100644
--- a/tests/providers/microsoft/azure/hooks/test_adx.py
+++ b/tests/providers/microsoft/azure/hooks/test_adx.py
@@ -21,7 +21,6 @@ from unittest import mock
import pytest
from azure.kusto.data import ClientRequestProperties, KustoClient,
KustoConnectionStringBuilder
-from pytest import param
from airflow.exceptions import AirflowException
from airflow.models import Connection
@@ -269,12 +268,12 @@ class TestAzureDataExplorerHook:
@pytest.mark.parametrize(
"mocked_connection",
[
- param(
+ pytest.param(
"a://usr:pw@host?extra__azure_data_explorer__tenant=my-tenant"
"&extra__azure_data_explorer__auth_method=AAD_APP",
id="prefix",
),
- param("a://usr:pw@host?tenant=my-tenant&auth_method=AAD_APP",
id="no-prefix"),
+
pytest.param("a://usr:pw@host?tenant=my-tenant&auth_method=AAD_APP",
id="no-prefix"),
],
indirect=True,
)
diff --git a/tests/providers/microsoft/azure/hooks/test_azure_data_factory.py
b/tests/providers/microsoft/azure/hooks/test_azure_data_factory.py
index 63a22614dc..508bc2ee78 100644
--- a/tests/providers/microsoft/azure/hooks/test_azure_data_factory.py
+++ b/tests/providers/microsoft/azure/hooks/test_azure_data_factory.py
@@ -24,7 +24,6 @@ import pytest
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.mgmt.datafactory.aio import DataFactoryManagementClient
from azure.mgmt.datafactory.models import FactoryListResponse
-from pytest import param
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
@@ -661,12 +660,12 @@ def test_connection_failure_missing_tenant_id():
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://?extra__azure_data_factory__resource_group_name=abc"
"&extra__azure_data_factory__factory_name=abc",
id="prefix",
),
- param("a://?resource_group_name=abc&factory_name=abc", id="no-prefix"),
+ pytest.param("a://?resource_group_name=abc&factory_name=abc",
id="no-prefix"),
],
)
@patch("airflow.providers.microsoft.azure.hooks.data_factory.AzureDataFactoryHook.get_conn")
@@ -680,12 +679,12 @@ def
test_provide_targeted_factory_backcompat_prefix_works(mock_connect, uri):
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://hi:yo@?extra__azure_data_factory__tenantId=ten"
"&extra__azure_data_factory__subscriptionId=sub",
id="prefix",
),
- param("a://hi:yo@?tenantId=ten&subscriptionId=sub", id="no-prefix"),
+ pytest.param("a://hi:yo@?tenantId=ten&subscriptionId=sub",
id="no-prefix"),
],
)
@patch("airflow.providers.microsoft.azure.hooks.data_factory.ClientSecretCredential")
diff --git a/tests/providers/microsoft/azure/hooks/test_azure_synapse.py
b/tests/providers/microsoft/azure/hooks/test_azure_synapse.py
index b63dacc6da..ae3291fbd4 100644
--- a/tests/providers/microsoft/azure/hooks/test_azure_synapse.py
+++ b/tests/providers/microsoft/azure/hooks/test_azure_synapse.py
@@ -21,7 +21,6 @@ from unittest.mock import MagicMock, patch
import pytest
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.synapse.spark import SparkClient
-from pytest import fixture
from airflow.models.connection import Connection
from airflow.providers.microsoft.azure.hooks.synapse import AzureSynapseHook,
AzureSynapseSparkBatchRunStatus
@@ -80,7 +79,7 @@ def setup_connections(create_mock_connections):
)
-@fixture
[email protected]
def hook():
client =
AzureSynapseHook(azure_synapse_conn_id=DEFAULT_CONNECTION_CLIENT_SECRET)
client._conn = MagicMock(spec=["spark_batch"])
diff --git a/tests/providers/microsoft/psrp/hooks/test_psrp.py
b/tests/providers/microsoft/psrp/hooks/test_psrp.py
index 563375e958..de998387ef 100644
--- a/tests/providers/microsoft/psrp/hooks/test_psrp.py
+++ b/tests/providers/microsoft/psrp/hooks/test_psrp.py
@@ -24,7 +24,6 @@ import pytest
from pypsrp.host import PSHost
from pypsrp.messages import MessageType
from pypsrp.powershell import PSInvocationState
-from pytest import raises
from airflow.exceptions import AirflowException
from airflow.models import Connection
@@ -124,7 +123,7 @@ class TestPsrpHook:
return conn
hook.get_connection = get_connection
- with raises(AirflowException, match="Unexpected extra configuration
keys: foo"):
+ with pytest.raises(AirflowException, match="Unexpected extra
configuration keys: foo"):
hook.get_conn()
@pytest.mark.parametrize(
diff --git a/tests/providers/salesforce/hooks/test_salesforce.py
b/tests/providers/salesforce/hooks/test_salesforce.py
index 2c437fdcb2..aa3b03a38d 100644
--- a/tests/providers/salesforce/hooks/test_salesforce.py
+++ b/tests/providers/salesforce/hooks/test_salesforce.py
@@ -24,7 +24,6 @@ from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
-from pytest import param
from requests import Session as request_session
from simple_salesforce import Salesforce, api
@@ -458,11 +457,11 @@ class TestSalesforceHook:
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://?extra__salesforce__security_token=token&extra__salesforce__domain=domain",
id="prefix",
),
- param("a://?security_token=token&domain=domain", id="no-prefix"),
+ pytest.param("a://?security_token=token&domain=domain",
id="no-prefix"),
],
)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
diff --git a/tests/providers/slack/hooks/test_slack.py
b/tests/providers/slack/hooks/test_slack.py
index 43a03f40f9..2156fdaa18 100644
--- a/tests/providers/slack/hooks/test_slack.py
+++ b/tests/providers/slack/hooks/test_slack.py
@@ -24,7 +24,6 @@ from unittest import mock
from unittest.mock import patch
import pytest
-from pytest import MonkeyPatch, param
from slack_sdk.errors import SlackApiError
from slack_sdk.http_retry.builtin_handlers import ConnectionErrorRetryHandler,
RateLimitErrorRetryHandler
from slack_sdk.web.slack_response import SlackResponse
@@ -69,7 +68,7 @@ def slack_api_connections():
),
]
- with MonkeyPatch.context() as mp:
+ with pytest.MonkeyPatch.context() as mp:
for conn in connections:
mp.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.get_uri())
yield
@@ -435,13 +434,13 @@ class TestSlackHook:
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://:abc@?extra__slack__timeout=123"
"&extra__slack__base_url=base_url"
"&extra__slack__proxy=proxy",
id="prefix",
),
- param("a://:abc@?timeout=123&base_url=base_url&proxy=proxy",
id="no-prefix"),
+
pytest.param("a://:abc@?timeout=123&base_url=base_url&proxy=proxy",
id="no-prefix"),
],
)
def test_backcompat_prefix_works(self, uri):
diff --git a/tests/providers/slack/hooks/test_slack_webhook.py
b/tests/providers/slack/hooks/test_slack_webhook.py
index 9b3ff8e093..3a12f643a3 100644
--- a/tests/providers/slack/hooks/test_slack_webhook.py
+++ b/tests/providers/slack/hooks/test_slack_webhook.py
@@ -26,7 +26,6 @@ from unittest import mock
from unittest.mock import patch
import pytest
-from pytest import MonkeyPatch, param
from slack_sdk.http_retry.builtin_handlers import ConnectionErrorRetryHandler,
RateLimitErrorRetryHandler
from slack_sdk.webhook.webhook_response import WebhookResponse
@@ -126,7 +125,7 @@ def slack_webhook_connections():
host="some.netloc",
),
]
- with MonkeyPatch.context() as mp:
+ with pytest.MonkeyPatch.context() as mp:
for conn in connections:
mp.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.get_uri())
yield
@@ -479,11 +478,11 @@ class TestSlackWebhookHook:
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://:abc@?extra__slackwebhook__timeout=123&extra__slackwebhook__proxy=proxy",
id="prefix",
),
- param("a://:abc@?timeout=123&proxy=proxy", id="no-prefix"),
+ pytest.param("a://:abc@?timeout=123&proxy=proxy", id="no-prefix"),
],
)
def test_backcompat_prefix_works(self, uri):
diff --git a/tests/providers/yandex/hooks/test_yandex.py
b/tests/providers/yandex/hooks/test_yandex.py
index ebf491cc5f..805138b7b1 100644
--- a/tests/providers/yandex/hooks/test_yandex.py
+++ b/tests/providers/yandex/hooks/test_yandex.py
@@ -22,7 +22,6 @@ from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
-from pytest import param
from airflow.exceptions import AirflowException
from airflow.providers.yandex.hooks.yandex import YandexCloudBaseHook
@@ -144,10 +143,10 @@ class TestYandexHook:
@pytest.mark.parametrize(
"uri",
[
- param(
+ pytest.param(
"a://?extra__yandexcloud__folder_id=abc&extra__yandexcloud__public_ssh_key=abc",
id="prefix"
),
- param("a://?folder_id=abc&public_ssh_key=abc", id="no-prefix"),
+ pytest.param("a://?folder_id=abc&public_ssh_key=abc",
id="no-prefix"),
],
)
@patch("airflow.providers.yandex.hooks.yandex.YandexCloudBaseHook._get_credentials",
new=MagicMock())
diff --git a/tests/ti_deps/deps/test_trigger_rule_dep.py
b/tests/ti_deps/deps/test_trigger_rule_dep.py
index b1f0cf64fb..229ffe6b1c 100644
--- a/tests/ti_deps/deps/test_trigger_rule_dep.py
+++ b/tests/ti_deps/deps/test_trigger_rule_dep.py
@@ -23,7 +23,6 @@ from unittest import mock
from unittest.mock import Mock
import pytest
-from pytest import param
from airflow.decorators import task, task_group
from airflow.models.baseoperator import BaseOperator
@@ -680,77 +679,77 @@ class TestTriggerRuleDep:
@pytest.mark.parametrize(
"task_cfg, states, exp_reason, exp_state",
[
- param(
+ pytest.param(
dict(work=2, setup=0),
dict(success=2, done=2),
None,
None,
id="no setups",
),
- param(
+ pytest.param(
dict(work=2, setup=1),
dict(success=2, done=2),
"but found 1 task(s) that were not done",
None,
id="setup not done",
),
- param(
+ pytest.param(
dict(work=2, setup=1),
dict(success=2, done=3),
"requires at least one upstream setup task be successful",
UPSTREAM_FAILED,
id="setup failed",
),
- param(
+ pytest.param(
dict(work=2, setup=2),
dict(success=2, done=4, success_setup=1),
None,
None,
id="one setup failed one success",
),
- param(
+ pytest.param(
dict(work=2, setup=2),
dict(success=2, done=3, success_setup=1),
"found 1 task(s) that were not done",
None,
id="one setup success one running",
),
- param(
+ pytest.param(
dict(work=2, setup=1),
dict(success=2, done=3, failed=1),
"requires at least one upstream setup task be successful",
UPSTREAM_FAILED,
id="setup failed",
),
- param(
+ pytest.param(
dict(work=2, setup=2),
dict(success=2, done=4, failed=1, skipped_setup=1),
"requires at least one upstream setup task be successful",
UPSTREAM_FAILED,
id="one setup failed one skipped",
),
- param(
+ pytest.param(
dict(work=2, setup=2),
dict(success=2, done=4, failed=0, skipped_setup=2),
"requires at least one upstream setup task be successful",
SKIPPED,
id="two setups both skipped",
),
- param(
+ pytest.param(
dict(work=2, setup=1),
dict(success=3, done=3, success_setup=1),
None,
None,
id="all success",
),
- param(
+ pytest.param(
dict(work=2, setup=1),
dict(success=1, done=3, success_setup=1),
None,
None,
id="work failed",
),
- param(
+ pytest.param(
dict(work=2, setup=1),
dict(success=2, done=3, skipped_setup=1),
"requires at least one upstream setup task be successful",
diff --git a/tests/utils/test_dates.py b/tests/utils/test_dates.py
index 6013e6b580..b8c7ee2d4e 100644
--- a/tests/utils/test_dates.py
+++ b/tests/utils/test_dates.py
@@ -22,7 +22,6 @@ from datetime import datetime, timedelta
import pendulum
import pytest
from dateutil.relativedelta import relativedelta
-from pytest import approx
from airflow.utils import dates, timezone
@@ -93,16 +92,16 @@ class TestDates:
def test_scale_time_units(self):
# floating point arrays
arr1 = dates.scale_time_units([130, 5400, 10], "minutes")
- assert arr1 == approx([2.1667, 90.0, 0.1667], rel=1e-3)
+ assert arr1 == pytest.approx([2.1667, 90.0, 0.1667], rel=1e-3)
arr2 = dates.scale_time_units([110, 50, 10, 100], "seconds")
- assert arr2 == approx([110.0, 50.0, 10.0, 100.0])
+ assert arr2 == pytest.approx([110.0, 50.0, 10.0, 100.0])
arr3 = dates.scale_time_units([100000, 50000, 10000, 20000], "hours")
- assert arr3 == approx([27.7778, 13.8889, 2.7778, 5.5556], rel=1e-3)
+ assert arr3 == pytest.approx([27.7778, 13.8889, 2.7778, 5.5556],
rel=1e-3)
arr4 = dates.scale_time_units([200000, 100000], "days")
- assert arr4 == approx([2.3147, 1.1574], rel=1e-3)
+ assert arr4 == pytest.approx([2.3147, 1.1574], rel=1e-3)
@pytest.mark.filterwarnings("ignore:`airflow.utils.dates.date_range:DeprecationWarning")
diff --git a/tests/utils/test_db_cleanup.py b/tests/utils/test_db_cleanup.py
index 362833131d..af243342b8 100644
--- a/tests/utils/test_db_cleanup.py
+++ b/tests/utils/test_db_cleanup.py
@@ -27,7 +27,6 @@ from uuid import uuid4
import pendulum
import pytest
-from pytest import param
from sqlalchemy import text
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import DeclarativeMeta
@@ -72,9 +71,9 @@ class TestDBCleanup:
@pytest.mark.parametrize(
"kwargs, called",
[
- param(dict(confirm=True), True, id="true"),
- param(dict(), True, id="not supplied"),
- param(dict(confirm=False), False, id="false"),
+ pytest.param(dict(confirm=True), True, id="true"),
+ pytest.param(dict(), True, id="not supplied"),
+ pytest.param(dict(confirm=False), False, id="false"),
],
)
@patch("airflow.utils.db_cleanup._cleanup_table", new=MagicMock())
@@ -96,9 +95,9 @@ class TestDBCleanup:
@pytest.mark.parametrize(
"kwargs, should_skip",
[
- param(dict(skip_archive=True), True, id="true"),
- param(dict(), False, id="not supplied"),
- param(dict(skip_archive=False), False, id="false"),
+ pytest.param(dict(skip_archive=True), True, id="true"),
+ pytest.param(dict(), False, id="not supplied"),
+ pytest.param(dict(skip_archive=False), False, id="false"),
],
)
@patch("airflow.utils.db_cleanup._cleanup_table")
@@ -178,12 +177,12 @@ class TestDBCleanup:
@pytest.mark.parametrize(
"table_name, date_add_kwargs, expected_to_delete, external_trigger",
[
- param("task_instance", dict(days=0), 0, False, id="beginning"),
- param("task_instance", dict(days=4), 4, False, id="middle"),
- param("task_instance", dict(days=9), 9, False, id="end_exactly"),
- param("task_instance", dict(days=9, microseconds=1), 10, False,
id="beyond_end"),
- param("dag_run", dict(days=9, microseconds=1), 9, False,
id="beyond_end_dr"),
- param("dag_run", dict(days=9, microseconds=1), 10, True,
id="beyond_end_dr_external"),
+ pytest.param("task_instance", dict(days=0), 0, False,
id="beginning"),
+ pytest.param("task_instance", dict(days=4), 4, False, id="middle"),
+ pytest.param("task_instance", dict(days=9), 9, False,
id="end_exactly"),
+ pytest.param("task_instance", dict(days=9, microseconds=1), 10,
False, id="beyond_end"),
+ pytest.param("dag_run", dict(days=9, microseconds=1), 9, False,
id="beyond_end_dr"),
+ pytest.param("dag_run", dict(days=9, microseconds=1), 10, True,
id="beyond_end_dr_external"),
],
)
def test__build_query(self, table_name, date_add_kwargs,
expected_to_delete, external_trigger):
@@ -220,12 +219,12 @@ class TestDBCleanup:
@pytest.mark.parametrize(
"table_name, date_add_kwargs, expected_to_delete, external_trigger",
[
- param("task_instance", dict(days=0), 0, False, id="beginning"),
- param("task_instance", dict(days=4), 4, False, id="middle"),
- param("task_instance", dict(days=9), 9, False, id="end_exactly"),
- param("task_instance", dict(days=9, microseconds=1), 10, False,
id="beyond_end"),
- param("dag_run", dict(days=9, microseconds=1), 9, False,
id="beyond_end_dr"),
- param("dag_run", dict(days=9, microseconds=1), 10, True,
id="beyond_end_dr_external"),
+ pytest.param("task_instance", dict(days=0), 0, False,
id="beginning"),
+ pytest.param("task_instance", dict(days=4), 4, False, id="middle"),
+ pytest.param("task_instance", dict(days=9), 9, False,
id="end_exactly"),
+ pytest.param("task_instance", dict(days=9, microseconds=1), 10,
False, id="beyond_end"),
+ pytest.param("dag_run", dict(days=9, microseconds=1), 9, False,
id="beyond_end_dr"),
+ pytest.param("dag_run", dict(days=9, microseconds=1), 10, True,
id="beyond_end_dr_external"),
],
)
def test__cleanup_table(self, table_name, date_add_kwargs,
expected_to_delete, external_trigger):
@@ -270,7 +269,7 @@ class TestDBCleanup:
@pytest.mark.parametrize(
"skip_archive, expected_archives",
- [param(True, 0, id="skip_archive"), param(False, 1, id="do_archive")],
+ [pytest.param(True, 0, id="skip_archive"), pytest.param(False, 1,
id="do_archive")],
)
def test__skip_archive(self, skip_archive, expected_archives):
"""
diff --git a/tests/utils/test_sqlalchemy.py b/tests/utils/test_sqlalchemy.py
index 76f1d41caa..f444ae56c0 100644
--- a/tests/utils/test_sqlalchemy.py
+++ b/tests/utils/test_sqlalchemy.py
@@ -25,7 +25,6 @@ from unittest.mock import MagicMock
import pytest
from kubernetes.client import models as k8s
-from pytest import param
from sqlalchemy import text
from sqlalchemy.exc import StatementError
@@ -278,19 +277,19 @@ class TestExecutorConfigType:
@pytest.mark.parametrize(
"input",
[
- param(
+ pytest.param(
pickle.dumps("anything"),
id="anything",
),
- param(
+ pytest.param(
pickle.dumps({"pod_override":
BaseSerialization.serialize(TEST_POD)}),
id="serialized_pod",
),
- param(
+ pytest.param(
pickle.dumps({"pod_override": TEST_POD}),
id="old_pickled_raw_pod",
),
- param(
+ pytest.param(
pickle.dumps({"pod_override": {"name": "hi"}}),
id="arbitrary_dict",
),
diff --git a/tests/www/views/test_views_connection.py
b/tests/www/views/test_views_connection.py
index c7698e3b7c..85b0494158 100644
--- a/tests/www/views/test_views_connection.py
+++ b/tests/www/views/test_views_connection.py
@@ -23,7 +23,6 @@ from unittest import mock
from unittest.mock import PropertyMock
import pytest
-from pytest import param
from airflow.models import Connection
from airflow.utils.session import create_session
@@ -135,9 +134,9 @@ def test_prefill_form_sensitive_fields_extra():
@pytest.mark.parametrize(
"extras, expected",
[
- param({"extra__test__my_param": "this_val"}, "this_val",
id="conn_not_upgraded"),
- param({"my_param": "my_val"}, "my_val", id="conn_upgraded"),
- param(
+ pytest.param({"extra__test__my_param": "this_val"}, "this_val",
id="conn_not_upgraded"),
+ pytest.param({"my_param": "my_val"}, "my_val", id="conn_upgraded"),
+ pytest.param(
{"extra__test__my_param": "this_val", "my_param": "my_val"},
"my_val",
id="conn_upgraded_old_val_present",