This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 434aa7a50ebb [SPARK-55612][PYTHON][TESTS] Add 
test_dataframe_query_context to modules
434aa7a50ebb is described below

commit 434aa7a50ebb97b44d50d0ebf3775850bd1b4670
Author: Tian Gao <[email protected]>
AuthorDate: Fri Feb 20 13:26:11 2026 +0900

    [SPARK-55612][PYTHON][TESTS] Add test_dataframe_query_context to modules
    
    ### What changes were proposed in this pull request?
    
    Add test_dataframe_query_context and test_parity_dataframe_query_context to 
modules.py and fix the failure.
    
    ### Why are the changes needed?
    
    The tests were not added so CI won't run them. They also currently failed 
because #50604 .
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Locally passed.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #54384 from gaogaotiantian/add-dataframe-query-context.
    
    Authored-by: Tian Gao <[email protected]>
    Signed-off-by: Hyukjin Kwon <[email protected]>
---
 dev/sparktestsupport/modules.py                    |  2 ++
 .../sql/tests/test_dataframe_query_context.py      | 22 ++++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/dev/sparktestsupport/modules.py b/dev/sparktestsupport/modules.py
index c89afa2e5131..d62bf4414ffd 100644
--- a/dev/sparktestsupport/modules.py
+++ b/dev/sparktestsupport/modules.py
@@ -558,6 +558,7 @@ pyspark_sql = Module(
         "pyspark.sql.tests.test_collection",
         "pyspark.sql.tests.test_creation",
         "pyspark.sql.tests.test_conversion",
+        "pyspark.sql.tests.test_dataframe_query_context",
         "pyspark.sql.tests.test_listener",
         "pyspark.sql.tests.test_observation",
         "pyspark.sql.tests.test_repartition",
@@ -1165,6 +1166,7 @@ pyspark_connect = Module(
         "pyspark.sql.tests.connect.test_parity_sql",
         "pyspark.sql.tests.connect.test_parity_job_cancellation",
         "pyspark.sql.tests.connect.test_parity_dataframe",
+        "pyspark.sql.tests.connect.test_parity_dataframe_query_context",
         "pyspark.sql.tests.connect.test_parity_collection",
         "pyspark.sql.tests.connect.test_parity_creation",
         "pyspark.sql.tests.connect.test_parity_observation",
diff --git a/python/pyspark/sql/tests/test_dataframe_query_context.py 
b/python/pyspark/sql/tests/test_dataframe_query_context.py
index ea64a77b3db4..d57dddf47af9 100644
--- a/python/pyspark/sql/tests/test_dataframe_query_context.py
+++ b/python/pyspark/sql/tests/test_dataframe_query_context.py
@@ -51,6 +51,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -66,6 +67,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -81,6 +83,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -96,6 +99,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -111,6 +115,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -126,6 +131,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -141,6 +147,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -156,6 +163,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -171,6 +179,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -186,6 +195,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -201,6 +211,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -216,6 +227,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -231,6 +243,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -263,6 +276,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -282,6 +296,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -299,6 +314,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -325,6 +341,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -340,6 +357,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -355,6 +373,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -385,6 +404,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -402,6 +422,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',
@@ -419,6 +440,7 @@ class DataFrameQueryContextTestsMixin:
                 exception=pe.exception,
                 errorClass="CAST_INVALID_INPUT",
                 messageParameters={
+                    "ansiConfig": '"spark.sql.ansi.enabled"',
                     "expression": "'string'",
                     "sourceType": '"STRING"',
                     "targetType": '"BIGINT"',


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to