This is an automated email from the ASF dual-hosted git repository.
jscheffl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/airflow.git
The following commit(s) were added to refs/heads/main by this push:
new 025ac8d888d Add workers.celery.nodeSelector &
workers.kubernetes.nodeSelector (#61957)
025ac8d888d is described below
commit 025ac8d888d94b9d69a4188756be0f8bdea53285
Author: Przemysław Mirowski <[email protected]>
AuthorDate: Thu Mar 12 22:59:59 2026 +0100
Add workers.celery.nodeSelector & workers.kubernetes.nodeSelector (#61957)
* Split some podTemplate & workers test cases
* Add workers.kubernetes.nodeSelector & workers.celery.nodeSelector
---
chart/files/pod-template-file.kubernetes-helm-yaml | 2 +-
chart/values.schema.json | 18 +-
chart/values.yaml | 9 +
.../airflow_aux/test_pod_template_file.py | 291 ++++++++++++++-------
.../tests/helm_tests/airflow_core/test_worker.py | 170 ++++++++----
.../helm_tests/airflow_core/test_worker_sets.py | 7 +
6 files changed, 349 insertions(+), 148 deletions(-)
diff --git a/chart/files/pod-template-file.kubernetes-helm-yaml
b/chart/files/pod-template-file.kubernetes-helm-yaml
index bad917ae2bf..8a93d8f7f10 100644
--- a/chart/files/pod-template-file.kubernetes-helm-yaml
+++ b/chart/files/pod-template-file.kubernetes-helm-yaml
@@ -17,7 +17,7 @@
under the License.
*/}}
---
-{{- $nodeSelector := or .Values.workers.nodeSelector .Values.nodeSelector }}
+{{- $nodeSelector := or .Values.workers.kubernetes.nodeSelector
.Values.workers.nodeSelector .Values.nodeSelector }}
{{- $affinity := or .Values.workers.affinity .Values.affinity }}
{{- $tolerations := or .Values.workers.tolerations .Values.tolerations }}
{{- $topologySpreadConstraints := or .Values.workers.topologySpreadConstraints
.Values.topologySpreadConstraints }}
diff --git a/chart/values.schema.json b/chart/values.schema.json
index f4186e6d784..31e6333e885 100644
--- a/chart/values.schema.json
+++ b/chart/values.schema.json
@@ -2332,7 +2332,7 @@
}
},
"nodeSelector": {
- "description": "Select certain nodes for Airflow Celery
worker pods and pods created with pod-template-file.",
+ "description": "Select certain nodes for Airflow Celery
worker pods and pods created with pod-template-file. Use
``workers.celery.nodeSelector`` and/or ``workers.kubernetes.nodeSelector`` to
separate value between Celery workers and pod-template-file.",
"type": "object",
"default": {},
"additionalProperties": {
@@ -3283,6 +3283,14 @@
"null"
],
"default": null
+ },
+ "nodeSelector": {
+ "description": "Select certain nodes for Airflow
Celery worker pods.",
+ "type": "object",
+ "default": {},
+ "additionalProperties": {
+ "type": "string"
+ }
}
}
},
@@ -3564,6 +3572,14 @@
"null"
],
"default": null
+ },
+ "nodeSelector": {
+ "description": "Select certain nodes for pods
created with pod-template-file.",
+ "type": "object",
+ "default": {},
+ "additionalProperties": {
+ "type": "string"
+ }
}
}
}
diff --git a/chart/values.yaml b/chart/values.yaml
index 94a6e005608..a5159c84faa 100644
--- a/chart/values.yaml
+++ b/chart/values.yaml
@@ -947,7 +947,10 @@ workers:
extraPorts: []
# Select certain nodes for Airflow Celery worker pods and pods created with
pod-template-file
+ # Use workers.celery.nodeSelector and/or workers.kubernetes.nodeSelector to
separate value
+ # between Celery workers and pod-template-file
nodeSelector: {}
+
runtimeClassName: ~
priorityClassName: ~
affinity: {}
@@ -1267,6 +1270,9 @@ workers:
# Grace period for tasks to finish after SIGTERM is sent from kubernetes
terminationGracePeriodSeconds: ~
+ # Select certain nodes for Airflow Celery worker pods
+ nodeSelector: {}
+
kubernetes:
# Command to use in pod-template-file (templated)
command: ~
@@ -1334,6 +1340,9 @@ workers:
# Grace period for tasks to finish after SIGTERM is sent from kubernetes
terminationGracePeriodSeconds: ~
+ # Select certain nodes for pods created with pod-template-file
+ nodeSelector: {}
+
# Airflow scheduler settings
scheduler:
enabled: true
diff --git a/helm-tests/tests/helm_tests/airflow_aux/test_pod_template_file.py
b/helm-tests/tests/helm_tests/airflow_aux/test_pod_template_file.py
index 15324d6cb51..fcf8c193fb8 100644
--- a/helm-tests/tests/helm_tests/airflow_aux/test_pod_template_file.py
+++ b/helm-tests/tests/helm_tests/airflow_aux/test_pod_template_file.py
@@ -375,7 +375,7 @@ class TestPodTemplateFile:
"readOnly": True,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
- def test_should_use_global_affinity_tolerations_and_node_selector(self):
+ def test_global_affinity(self):
docs = render_chart(
values={
"executor": "KubernetesExecutor",
@@ -392,43 +392,77 @@ class TestPodTemplateFile:
}
}
},
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.affinity", docs[0]) == {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {"key": "foo", "operator": "In", "values":
["true"]},
+ ]
+ }
+ ]
+ }
+ }
+ }
+
+ def test_global_tolerations(self):
+ docs = render_chart(
+ values={
+ "executor": "KubernetesExecutor",
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
],
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.tolerations", docs[0]) == [
+ {"key": "dynamic-pods", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
+ ]
+
+ def test_global_topology_spread_constraints(self):
+ expected_topology_spread_constraints = [
+ {
+ "maxSkew": 1,
+ "topologyKey": "foo",
+ "whenUnsatisfiable": "ScheduleAnyway",
+ "labelSelector": {"matchLabels": {"tier": "airflow"}},
+ }
+ ]
+ docs = render_chart(
+ values={"topologySpreadConstraints":
expected_topology_spread_constraints},
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert expected_topology_spread_constraints == jmespath.search(
+ "spec.topologySpreadConstraints", docs[0]
+ )
+
+ def test_global_node_selector(self):
+ docs = render_chart(
+ values={
+ "executor": "KubernetesExecutor",
"nodeSelector": {"diskType": "ssd"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
- assert re.search("Pod", docs[0]["kind"])
- assert (
- jmespath.search(
- "spec.affinity.nodeAffinity."
- "requiredDuringSchedulingIgnoredDuringExecution."
- "nodeSelectorTerms[0]."
- "matchExpressions[0]."
- "key",
- docs[0],
- )
- == "foo"
- )
- assert (
- jmespath.search(
- "spec.nodeSelector.diskType",
- docs[0],
- )
- == "ssd"
- )
- assert (
- jmespath.search(
- "spec.tolerations[0].key",
- docs[0],
- )
- == "dynamic-pods"
- )
-
- def
test_should_create_valid_affinity_tolerations_topology_spread_constraints_and_node_selector(self):
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.nodeSelector", docs[0]) == {"diskType":
"ssd"}
+
+ def test_workers_affinity(self):
docs = render_chart(
values={
"executor": "KubernetesExecutor",
@@ -446,9 +480,51 @@ class TestPodTemplateFile:
}
}
},
+ },
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.affinity", docs[0]) == {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {"key": "foo", "operator": "In", "values":
["true"]},
+ ]
+ }
+ ]
+ }
+ }
+ }
+
+ def test_workers_tolerations(self):
+ docs = render_chart(
+ values={
+ "executor": "KubernetesExecutor",
+ "workers": {
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
],
+ },
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.tolerations", docs[0]) == [
+ {"key": "dynamic-pods", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
+ ]
+
+ def test_workers_topology_spread_constraints(self):
+ docs = render_chart(
+ values={
+ "executor": "KubernetesExecutor",
+ "workers": {
"topologySpreadConstraints": [
{
"maxSkew": 1,
@@ -457,7 +533,6 @@ class TestPodTemplateFile:
"labelSelector": {"matchLabels": {"tier":
"airflow"}},
}
],
- "nodeSelector": {"diskType": "ssd"},
},
},
show_only=["templates/pod-template-file.yaml"],
@@ -465,41 +540,37 @@ class TestPodTemplateFile:
)
assert jmespath.search("kind", docs[0]) == "Pod"
- assert (
- jmespath.search(
- "spec.affinity.nodeAffinity."
- "requiredDuringSchedulingIgnoredDuringExecution."
- "nodeSelectorTerms[0]."
- "matchExpressions[0]."
- "key",
- docs[0],
- )
- == "foo"
- )
- assert (
- jmespath.search(
- "spec.nodeSelector.diskType",
- docs[0],
- )
- == "ssd"
- )
- assert (
- jmespath.search(
- "spec.tolerations[0].key",
- docs[0],
- )
- == "dynamic-pods"
- )
- assert (
- jmespath.search(
- "spec.topologySpreadConstraints[0].topologyKey",
- docs[0],
- )
- == "foo"
- )
-
- def
test_affinity_tolerations_topology_spread_constraints_and_node_selector_precedence(self):
- """When given both global and worker affinity etc, worker affinity etc
is used."""
+ assert jmespath.search("spec.topologySpreadConstraints", docs[0]) == [
+ {
+ "maxSkew": 1,
+ "topologyKey": "foo",
+ "whenUnsatisfiable": "ScheduleAnyway",
+ "labelSelector": {"matchLabels": {"tier": "airflow"}},
+ }
+ ]
+
+ @pytest.mark.parametrize(
+ "workers_values",
+ [
+ {"nodeSelector": {"diskType": "ssd"}},
+ {"kubernetes": {"nodeSelector": {"diskType": "ssd"}}},
+ {"nodeSelector": {"ssd": "diskType"}, "kubernetes":
{"nodeSelector": {"diskType": "ssd"}}},
+ ],
+ )
+ def test_workers_node_selector(self, workers_values):
+ docs = render_chart(
+ values={
+ "executor": "KubernetesExecutor",
+ "workers": workers_values,
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.nodeSelector", docs[0]) == {"diskType":
"ssd"}
+
+ def test_affinity_overwrite(self):
expected_affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
@@ -513,22 +584,9 @@ class TestPodTemplateFile:
}
}
}
- expected_topology_spread_constraints = {
- "maxSkew": 1,
- "topologyKey": "foo",
- "whenUnsatisfiable": "ScheduleAnyway",
- "labelSelector": {"matchLabels": {"tier": "airflow"}},
- }
docs = render_chart(
values={
- "workers": {
- "affinity": expected_affinity,
- "tolerations": [
- {"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
- ],
- "topologySpreadConstraints":
[expected_topology_spread_constraints],
- "nodeSelector": {"type": "ssd"},
- },
+ "workers": {"affinity": expected_affinity},
"affinity": {
"nodeAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [
@@ -543,9 +601,47 @@ class TestPodTemplateFile:
]
}
},
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert expected_affinity == jmespath.search("spec.affinity", docs[0])
+
+ def test_tolerations_overwrite(self):
+ docs = render_chart(
+ values={
+ "workers": {
+ "tolerations": [
+ {"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
+ ],
+ },
"tolerations": [
{"key": "not-me", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
],
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
+ )
+
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.tolerations", docs[0]) == [
+ {"key": "dynamic-pods", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
+ ]
+
+ def test_topology_spread_constraints_overwrite(self):
+ expected_topology_spread_constraints = {
+ "maxSkew": 1,
+ "topologyKey": "foo",
+ "whenUnsatisfiable": "ScheduleAnyway",
+ "labelSelector": {"matchLabels": {"tier": "airflow"}},
+ }
+ docs = render_chart(
+ values={
+ "workers": {
+ "topologySpreadConstraints":
[expected_topology_spread_constraints],
+ },
"topologySpreadConstraints": [
{
"maxSkew": 1,
@@ -554,27 +650,36 @@ class TestPodTemplateFile:
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
],
- "nodeSelector": {"type": "not-me"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
- assert expected_affinity == jmespath.search("spec.affinity", docs[0])
- assert (
- jmespath.search(
- "spec.nodeSelector.type",
- docs[0],
- )
- == "ssd"
- )
- tolerations = jmespath.search("spec.tolerations", docs[0])
- assert len(tolerations) == 1
- assert tolerations[0]["key"] == "dynamic-pods"
- assert expected_topology_spread_constraints == jmespath.search(
- "spec.topologySpreadConstraints[0]", docs[0]
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert [expected_topology_spread_constraints] == jmespath.search(
+ "spec.topologySpreadConstraints", docs[0]
+ )
+
+ @pytest.mark.parametrize(
+ "workers_values",
+ [
+ {"nodeSelector": {"diskType": "ssd"}},
+ {"kubernetes": {"nodeSelector": {"diskType": "ssd"}}},
+ ],
+ )
+ def test_node_selector_overwrite(self, workers_values):
+ docs = render_chart(
+ values={
+ "workers": workers_values,
+ "nodeSelector": {"type": "not-me"},
+ },
+ show_only=["templates/pod-template-file.yaml"],
+ chart_dir=self.temp_chart_dir,
)
+ assert jmespath.search("kind", docs[0]) == "Pod"
+ assert jmespath.search("spec.nodeSelector", docs[0]) == {"diskType":
"ssd"}
+
@pytest.mark.parametrize(
("base_scheduler_name", "worker_scheduler_name", "expected"),
[
diff --git a/helm-tests/tests/helm_tests/airflow_core/test_worker.py
b/helm-tests/tests/helm_tests/airflow_core/test_worker.py
index 325695d9631..73ccb6b0efa 100644
--- a/helm-tests/tests/helm_tests/airflow_core/test_worker.py
+++ b/helm-tests/tests/helm_tests/airflow_core/test_worker.py
@@ -493,7 +493,7 @@ class TestWorker:
assert expected_strategy == jmespath.search("spec.strategy", docs[0])
- def test_should_create_valid_affinity_tolerations_and_node_selector(self):
+ def test_affinity(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
@@ -511,44 +511,83 @@ class TestWorker:
}
}
},
+ },
+ },
+ show_only=["templates/workers/worker-deployment.yaml"],
+ )
+
+ assert jmespath.search("kind", docs[0]) == "StatefulSet"
+ assert jmespath.search("spec.template.spec.affinity", docs[0]) == {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {"key": "foo", "operator": "In", "values":
["true"]},
+ ]
+ }
+ ]
+ }
+ }
+ }
+
+ def test_tolerations(self):
+ docs = render_chart(
+ values={
+ "executor": "CeleryExecutor",
+ "workers": {
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
],
- "nodeSelector": {"diskType": "ssd"},
},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert jmespath.search("kind", docs[0]) == "StatefulSet"
- assert (
- jmespath.search(
- "spec.template.spec.affinity.nodeAffinity."
- "requiredDuringSchedulingIgnoredDuringExecution."
- "nodeSelectorTerms[0]."
- "matchExpressions[0]."
- "key",
- docs[0],
- )
- == "foo"
+ assert jmespath.search("spec.template.spec.tolerations", docs[0]) == [
+ {"key": "dynamic-pods", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
+ ]
+
+ def test_topology_spread_constraints(self):
+ expected_topology_spread_constraints = [
+ {
+ "maxSkew": 1,
+ "topologyKey": "foo",
+ "whenUnsatisfiable": "ScheduleAnyway",
+ "labelSelector": {"matchLabels": {"tier": "airflow"}},
+ }
+ ]
+ docs = render_chart(
+ values={"workers": {"topologySpreadConstraints":
expected_topology_spread_constraints}},
+ show_only=["templates/workers/worker-deployment.yaml"],
)
- assert (
- jmespath.search(
- "spec.template.spec.nodeSelector.diskType",
- docs[0],
- )
- == "ssd"
+
+ assert expected_topology_spread_constraints == jmespath.search(
+ "spec.template.spec.topologySpreadConstraints", docs[0]
)
- assert (
- jmespath.search(
- "spec.template.spec.tolerations[0].key",
- docs[0],
- )
- == "dynamic-pods"
+
+ @pytest.mark.parametrize(
+ "workers_values",
+ [
+ {"nodeSelector": {"diskType": "ssd"}},
+ {"celery": {"nodeSelector": {"diskType": "ssd"}}},
+ {"nodeSelector": {"ssd": "diskType"}, "celery": {"nodeSelector":
{"diskType": "ssd"}}},
+ ],
+ )
+ def test_node_selector(self, workers_values):
+ docs = render_chart(
+ values={
+ "executor": "CeleryExecutor",
+ "workers": workers_values,
+ },
+ show_only=["templates/workers/worker-deployment.yaml"],
)
- def
test_affinity_tolerations_topology_spread_constraints_and_node_selector_precedence(self):
- """When given both global and worker affinity etc, worker affinity etc
is used."""
+ assert jmespath.search("kind", docs[0]) == "StatefulSet"
+ assert jmespath.search("spec.template.spec.nodeSelector", docs[0]) ==
{"diskType": "ssd"}
+
+ def test_affinity_overwrite(self):
expected_affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
@@ -562,22 +601,9 @@ class TestWorker:
}
}
}
- expected_topology_spread_constraints = {
- "maxSkew": 1,
- "topologyKey": "foo",
- "whenUnsatisfiable": "ScheduleAnyway",
- "labelSelector": {"matchLabels": {"tier": "airflow"}},
- }
docs = render_chart(
values={
- "workers": {
- "affinity": expected_affinity,
- "tolerations": [
- {"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
- ],
- "topologySpreadConstraints":
[expected_topology_spread_constraints],
- "nodeSelector": {"type": "ssd"},
- },
+ "workers": {"affinity": expected_affinity},
"affinity": {
"nodeAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [
@@ -592,9 +618,41 @@ class TestWorker:
]
}
},
+ },
+ show_only=["templates/workers/worker-deployment.yaml"],
+ )
+
+ assert expected_affinity ==
jmespath.search("spec.template.spec.affinity", docs[0])
+
+ def test_tolerations_overwrite(self):
+ docs = render_chart(
+ values={
+ "workers": {
+ "tolerations": [
+ {"key": "dynamic-pods", "operator": "Equal", "value":
"true", "effect": "NoSchedule"}
+ ],
+ },
"tolerations": [
{"key": "not-me", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
],
+ },
+ show_only=["templates/workers/worker-deployment.yaml"],
+ )
+
+ assert jmespath.search("spec.template.spec.tolerations", docs[0]) == [
+ {"key": "dynamic-pods", "operator": "Equal", "value": "true",
"effect": "NoSchedule"}
+ ]
+
+ def test_topology_spread_constraints_overwrite(self):
+ expected_topology_spread_constraints = {
+ "maxSkew": 1,
+ "topologyKey": "foo",
+ "whenUnsatisfiable": "ScheduleAnyway",
+ "labelSelector": {"matchLabels": {"tier": "airflow"}},
+ }
+ docs = render_chart(
+ values={
+ "workers": {"topologySpreadConstraints":
[expected_topology_spread_constraints]},
"topologySpreadConstraints": [
{
"maxSkew": 1,
@@ -603,26 +661,32 @@ class TestWorker:
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
],
- "nodeSelector": {"type": "not-me"},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
- assert expected_affinity ==
jmespath.search("spec.template.spec.affinity", docs[0])
- assert (
- jmespath.search(
- "spec.template.spec.nodeSelector.type",
- docs[0],
- )
- == "ssd"
+ assert [expected_topology_spread_constraints] == jmespath.search(
+ "spec.template.spec.topologySpreadConstraints", docs[0]
)
- tolerations = jmespath.search("spec.template.spec.tolerations",
docs[0])
- assert len(tolerations) == 1
- assert tolerations[0]["key"] == "dynamic-pods"
- assert expected_topology_spread_constraints == jmespath.search(
- "spec.template.spec.topologySpreadConstraints[0]", docs[0]
+
+ @pytest.mark.parametrize(
+ "workers_values",
+ [
+ {"nodeSelector": {"diskType": "ssd"}},
+ {"celery": {"nodeSelector": {"diskType": "ssd"}}},
+ ],
+ )
+ def test_node_selector_overwrite(self, workers_values):
+ docs = render_chart(
+ values={
+ "workers": workers_values,
+ "nodeSelector": {"type": "not-me"},
+ },
+ show_only=["templates/workers/worker-deployment.yaml"],
)
+ assert jmespath.search("spec.template.spec.nodeSelector", docs[0]) ==
{"diskType": "ssd"}
+
@pytest.mark.parametrize(
("base_scheduler_name", "worker_scheduler_name", "expected"),
[
diff --git a/helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py
b/helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py
index 58107647491..69c8234bd99 100644
--- a/helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py
+++ b/helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py
@@ -2449,6 +2449,13 @@ class TestWorkerSets:
"sets": [{"name": "set1", "nodeSelector": {"name":
"test-node"}}],
},
},
+ {
+ "celery": {
+ "nodeSelector": {"test": "name"},
+ "enableDefault": False,
+ "sets": [{"name": "set1", "nodeSelector": {"name":
"test-node"}}],
+ },
+ },
],
)
def test_overwrite_node_selector(self, workers_values):