This is an automated email from the ASF dual-hosted git repository.
ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 6f0900ec0230 [SPARK-55061][PYTHON][TESTS] Make module missing message
consistently string
6f0900ec0230 is described below
commit 6f0900ec0230ccd18fb79c4305364814d4fe1af6
Author: Tian Gao <[email protected]>
AuthorDate: Fri Jan 16 19:34:44 2026 +0800
[SPARK-55061][PYTHON][TESTS] Make module missing message consistently string
### What changes were proposed in this pull request?
Instead of using `None` for module_requirement_message, use `""`.
### Why are the changes needed?
We had a lot of unnecessary `cast` and `type: ignore` just because this
message could be `None` and `unittest.skipIf()` takes a string. `""` is a
string and also a falsy value, so the existing message1 or message2 or message3
logic will still work.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
CI
### Was this patch authored or co-authored using generative AI tooling?
Yes, cursor (claude-4.5-opus-high)
Closes #53826 from gaogaotiantian/change-message-type.
Authored-by: Tian Gao <[email protected]>
Signed-off-by: Ruifeng Zheng <[email protected]>
---
python/pyspark/resource/tests/test_resources.py | 4 +-
python/pyspark/sql/tests/arrow/test_arrow.py | 5 +--
.../sql/tests/arrow/test_arrow_cogrouped_map.py | 2 +-
.../sql/tests/arrow/test_arrow_grouped_map.py | 2 +-
.../pandas/streaming/test_transform_with_state.py | 4 +-
.../test_transform_with_state_state_variable.py | 4 +-
python/pyspark/sql/tests/pandas/test_converter.py | 4 +-
.../sql/tests/pandas/test_pandas_cogrouped_map.py | 4 +-
.../sql/tests/pandas/test_pandas_grouped_map.py | 4 +-
.../pandas/test_pandas_grouped_map_with_state.py | 3 +-
python/pyspark/sql/tests/pandas/test_pandas_map.py | 3 +-
.../sql/tests/pandas/test_pandas_sqlmetrics.py | 4 +-
python/pyspark/sql/tests/pandas/test_pandas_udf.py | 3 +-
.../tests/pandas/test_pandas_udf_grouped_agg.py | 4 +-
.../sql/tests/pandas/test_pandas_udf_scalar.py | 5 +--
.../sql/tests/pandas/test_pandas_udf_typehints.py | 4 +-
...pandas_udf_typehints_with_future_annotations.py | 4 +-
.../sql/tests/pandas/test_pandas_udf_window.py | 3 +-
python/pyspark/sql/tests/test_collection.py | 14 +++----
python/pyspark/sql/tests/test_creation.py | 10 ++---
python/pyspark/sql/tests/test_dataframe.py | 3 +-
python/pyspark/sql/tests/test_group.py | 12 +++---
python/pyspark/sql/tests/test_listener.py | 4 +-
python/pyspark/sql/tests/test_udf.py | 6 +--
python/pyspark/sql/tests/test_udf_profiler.py | 24 +++++------
python/pyspark/testing/sqlutils.py | 4 +-
python/pyspark/testing/streamingutils.py | 4 +-
python/pyspark/testing/utils.py | 48 +++++++++++-----------
python/pyspark/tests/test_memory_profiler.py | 26 ++++++------
29 files changed, 99 insertions(+), 122 deletions(-)
diff --git a/python/pyspark/resource/tests/test_resources.py
b/python/pyspark/resource/tests/test_resources.py
index 084d566de19b..7b151a40dded 100644
--- a/python/pyspark/resource/tests/test_resources.py
+++ b/python/pyspark/resource/tests/test_resources.py
@@ -15,8 +15,6 @@
# limitations under the License.
#
import unittest
-from typing import cast
-
from pyspark.resource import ExecutorResourceRequests, ResourceProfileBuilder,
TaskResourceRequests
from pyspark.sql import SparkSession
from pyspark.testing.sqlutils import (
@@ -80,7 +78,7 @@ class ResourceProfileTests(unittest.TestCase):
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_profile_before_sc_for_sql(self):
rpb = ResourceProfileBuilder()
diff --git a/python/pyspark/sql/tests/arrow/test_arrow.py
b/python/pyspark/sql/tests/arrow/test_arrow.py
index a29408980fab..922fbef96215 100644
--- a/python/pyspark/sql/tests/arrow/test_arrow.py
+++ b/python/pyspark/sql/tests/arrow/test_arrow.py
@@ -21,7 +21,6 @@ import threading
import calendar
import time
import unittest
-from typing import cast
from collections import namedtuple
from pyspark import SparkConf
@@ -1856,7 +1855,7 @@ class ArrowTestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class ArrowTests(ArrowTestsMixin, ReusedSQLTestCase):
pass
@@ -1864,7 +1863,7 @@ class ArrowTests(ArrowTestsMixin, ReusedSQLTestCase):
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
diff --git a/python/pyspark/sql/tests/arrow/test_arrow_cogrouped_map.py
b/python/pyspark/sql/tests/arrow/test_arrow_cogrouped_map.py
index 02bbe34d6c0f..98362a44d3eb 100644
--- a/python/pyspark/sql/tests/arrow/test_arrow_cogrouped_map.py
+++ b/python/pyspark/sql/tests/arrow/test_arrow_cogrouped_map.py
@@ -37,7 +37,7 @@ if have_pyarrow:
@unittest.skipIf(
not have_pyarrow,
- pyarrow_requirement_message, # type: ignore[arg-type]
+ pyarrow_requirement_message,
)
class CogroupedMapInArrowTestsMixin:
@property
diff --git a/python/pyspark/sql/tests/arrow/test_arrow_grouped_map.py
b/python/pyspark/sql/tests/arrow/test_arrow_grouped_map.py
index a8d6327744b4..9c1b14676ecc 100644
--- a/python/pyspark/sql/tests/arrow/test_arrow_grouped_map.py
+++ b/python/pyspark/sql/tests/arrow/test_arrow_grouped_map.py
@@ -59,7 +59,7 @@ def function_variations(func):
@unittest.skipIf(
not have_pyarrow,
- pyarrow_requirement_message, # type: ignore[arg-type]
+ pyarrow_requirement_message,
)
class ApplyInArrowTestsMixin:
@property
diff --git
a/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state.py
b/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state.py
index 75ce69f402ae..ac96c6d6b83a 100644
--- a/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state.py
+++ b/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state.py
@@ -17,8 +17,6 @@
import os
import unittest
-from typing import cast
-
from pyspark import SparkConf
from pyspark.testing.sqlutils import (
have_pyarrow,
@@ -32,7 +30,7 @@ from
pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state import
@unittest.skipIf(
not have_pyarrow or os.environ.get("PYTHON_GIL", "?") == "0",
- cast(str, pyarrow_requirement_message or "Not supported in no-GIL mode"),
+ pyarrow_requirement_message or "Not supported in no-GIL mode",
)
class TransformWithStateInPySparkTestsMixin(TransformWithStateTestsMixin):
@classmethod
diff --git
a/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state_state_variable.py
b/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state_state_variable.py
index aae86783b771..437f11dcb714 100644
---
a/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state_state_variable.py
+++
b/python/pyspark/sql/tests/pandas/streaming/test_transform_with_state_state_variable.py
@@ -17,8 +17,6 @@
import os
import unittest
-from typing import cast
-
from pyspark import SparkConf
from pyspark.testing.sqlutils import (
have_pyarrow,
@@ -33,7 +31,7 @@ from
pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state_state_v
@unittest.skipIf(
not have_pyarrow or os.environ.get("PYTHON_GIL", "?") == "0",
- cast(str, pyarrow_requirement_message or "Not supported in no-GIL mode"),
+ pyarrow_requirement_message or "Not supported in no-GIL mode",
)
class
TransformWithStateInPySparkStateVariableTestsMixin(TransformWithStateStateVariableTestsMixin):
@classmethod
diff --git a/python/pyspark/sql/tests/pandas/test_converter.py
b/python/pyspark/sql/tests/pandas/test_converter.py
index 05d2488e66b5..30828b1ccb3b 100644
--- a/python/pyspark/sql/tests/pandas/test_converter.py
+++ b/python/pyspark/sql/tests/pandas/test_converter.py
@@ -16,8 +16,6 @@
#
import unittest
-from typing import cast
-
from pyspark.sql.types import (
ArrayType,
IntegerType,
@@ -46,7 +44,7 @@ if have_pyarrow:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class ConverterTests(unittest.TestCase):
def test_converter_to_pandas_array(self):
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py
b/python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py
index 6aac48117920..ae9d55833265 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py
@@ -17,8 +17,6 @@
import unittest
import logging
-from typing import cast
-
from pyspark.sql import functions as sf
from pyspark.sql.functions import pandas_udf, udf
from pyspark.sql.types import (
@@ -52,7 +50,7 @@ if have_pyarrow:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class CogroupedApplyInPandasTestsMixin:
@property
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py
b/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py
index a8eac55de472..3f7fa24d826d 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py
@@ -22,7 +22,7 @@ import os
from collections import OrderedDict
from decimal import Decimal
-from typing import cast, Iterator, Tuple, Any
+from typing import Iterator, Tuple, Any
from pyspark.sql import Row, functions as sf
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
@@ -62,7 +62,7 @@ if have_pyarrow and have_pandas:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class ApplyInPandasTestsMixin:
@property
diff --git
a/python/pyspark/sql/tests/pandas/test_pandas_grouped_map_with_state.py
b/python/pyspark/sql/tests/pandas/test_pandas_grouped_map_with_state.py
index a0cf0712ef10..dddc210e0ddd 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_grouped_map_with_state.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_grouped_map_with_state.py
@@ -22,7 +22,6 @@ import sys
import tempfile
import unittest
-from typing import cast
from decimal import Decimal
from pyspark.sql.streaming.state import GroupStateTimeout, GroupState
@@ -52,7 +51,7 @@ if have_pyarrow:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class GroupedApplyInPandasWithStateTestsMixin:
@classmethod
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_map.py
b/python/pyspark/sql/tests/pandas/test_pandas_map.py
index 536fdf4c5b4e..634acf95fd9b 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_map.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_map.py
@@ -20,7 +20,6 @@ import tempfile
import time
import unittest
import logging
-from typing import cast
from pyspark.sql import Row
from pyspark.sql.functions import col, encode, lit
@@ -43,7 +42,7 @@ if have_pandas:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class MapInPandasTestsMixin:
spark: SparkSession
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_sqlmetrics.py
b/python/pyspark/sql/tests/pandas/test_pandas_sqlmetrics.py
index 7277f6090b25..fc1fa70fe6c2 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_sqlmetrics.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_sqlmetrics.py
@@ -16,8 +16,6 @@
#
import unittest
-from typing import cast
-
from pyspark.sql.functions import pandas_udf
from pyspark.testing.sqlutils import (
ReusedSQLTestCase,
@@ -30,7 +28,7 @@ from pyspark.testing.sqlutils import (
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class PandasSQLMetrics(ReusedSQLTestCase):
def test_pandas_sql_metrics_basic(self):
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf.py
b/python/pyspark/sql/tests/pandas/test_pandas_udf.py
index ee5e51a1c4b6..eec961218ce7 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf.py
@@ -17,7 +17,6 @@
import unittest
import datetime
-from typing import cast
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType, assert_true,
lit
from pyspark.sql.types import (
@@ -41,7 +40,7 @@ from pyspark.testing.sqlutils import (
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class PandasUDFTestsMixin:
def test_pandas_udf_basic(self):
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
b/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
index 3bc6ad75dbdd..46e7b430bc02 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
@@ -17,7 +17,7 @@
import unittest
import logging
-from typing import cast, Iterator, Tuple
+from typing import Iterator, Tuple
from pyspark.util import PythonEvalType, is_remote_only
from pyspark.sql import Row, functions as sf
@@ -50,7 +50,7 @@ if have_pandas:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class GroupedAggPandasUDFTestsMixin:
@property
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf_scalar.py
b/python/pyspark/sql/tests/pandas/test_pandas_udf_scalar.py
index 032fc21ed63f..a9656bfce4a6 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf_scalar.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf_scalar.py
@@ -23,7 +23,6 @@ import unittest
import logging
from datetime import date, datetime
from decimal import Decimal
-from typing import cast
from pyspark import TaskContext
from pyspark.util import PythonEvalType, is_remote_only
@@ -82,7 +81,7 @@ if have_pyarrow:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class ScalarPandasUDFTestsMixin:
@property
@@ -1725,7 +1724,7 @@ class ScalarPandasUDFTestsMixin:
self.assertEqual(expected, df1.collect())
# SPARK-24721
- @unittest.skipIf(not test_compiled, test_not_compiled_message) # type:
ignore
+ @unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints.py
b/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints.py
index 5f8335b39e35..d368bff06629 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints.py
@@ -16,7 +16,7 @@
#
import unittest
from inspect import signature
-from typing import Union, Iterator, Tuple, cast, get_type_hints
+from typing import Union, Iterator, Tuple, get_type_hints
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import (
@@ -39,7 +39,7 @@ if have_pandas:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
diff --git
a/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints_with_future_annotations.py
b/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints_with_future_annotations.py
index c2d51bc580a1..ecf3c91f1f91 100644
---
a/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints_with_future_annotations.py
+++
b/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints_with_future_annotations.py
@@ -18,7 +18,7 @@ from __future__ import annotations
import unittest
from inspect import signature
-from typing import Union, Iterator, Tuple, cast, get_type_hints
+from typing import Union, Iterator, Tuple, get_type_hints
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import (
@@ -40,7 +40,7 @@ if have_pandas:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class PandasUDFTypeHintsWithFutureAnnotationsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
b/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
index 6bc2925db821..27d1f3dc21c6 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
@@ -17,7 +17,6 @@
import unittest
import logging
-from typing import cast
from decimal import Decimal
from pyspark.errors import AnalysisException, PythonException
@@ -48,7 +47,7 @@ if have_pandas:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
class WindowPandasUDFTestsMixin:
@property
diff --git a/python/pyspark/sql/tests/test_collection.py
b/python/pyspark/sql/tests/test_collection.py
index 3556abeb7139..fb04d59989db 100644
--- a/python/pyspark/sql/tests/test_collection.py
+++ b/python/pyspark/sql/tests/test_collection.py
@@ -93,7 +93,7 @@ class DataFrameCollectionTestsMixin:
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
@@ -108,7 +108,7 @@ class DataFrameCollectionTestsMixin:
self.assertEqual(types[6], "datetime64[ns]")
self.assertEqual(types[7], "timedelta64[ns]")
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_with_duplicated_column_names(self):
for arrow_enabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled":
arrow_enabled}):
@@ -124,7 +124,7 @@ class DataFrameCollectionTestsMixin:
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_on_cross_join(self):
for arrow_enabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled":
arrow_enabled}):
@@ -153,7 +153,7 @@ class DataFrameCollectionTestsMixin:
with self.assertRaisesRegex(ImportError, "Pandas >= .* must be
installed"):
self._to_pandas()
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
@@ -165,7 +165,7 @@ class DataFrameCollectionTestsMixin:
self.assertEqual(types[1], object)
self.assertEqual(types[2], np.float64)
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_empty_dataframe(self):
is_arrow_enabled = [True, False]
for value in is_arrow_enabled:
@@ -195,7 +195,7 @@ class DataFrameCollectionTestsMixin:
dtypes_when_empty_df =
self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df ==
dtypes_when_nonempty_df))
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_null_dataframe(self):
is_arrow_enabled = [True, False]
for value in is_arrow_enabled:
@@ -235,7 +235,7 @@ class DataFrameCollectionTestsMixin:
self.assertTrue(np.can_cast(np.datetime64, types[9]))
self.assertTrue(np.can_cast(np.timedelta64, types[10]))
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_mixed_dataframe(self):
is_arrow_enabled = [True, False]
for value in is_arrow_enabled:
diff --git a/python/pyspark/sql/tests/test_creation.py
b/python/pyspark/sql/tests/test_creation.py
index c289abfc1f3c..439bb60dbea3 100644
--- a/python/pyspark/sql/tests/test_creation.py
+++ b/python/pyspark/sql/tests/test_creation.py
@@ -19,8 +19,6 @@ from decimal import Decimal
import os
import time
import unittest
-from typing import cast
-
from pyspark.sql import Row
import pyspark.sql.functions as F
from pyspark.sql.types import (
@@ -84,7 +82,7 @@ class DataFrameCreationTestsMixin:
self.assertIsInstance(df.schema["t"].dataType, TimeType)
self.assertEqual(df.count(), 3)
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
@@ -120,7 +118,7 @@ class DataFrameCreationTestsMixin:
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.testing import assert_frame_equal
@@ -145,7 +143,7 @@ class DataFrameCreationTestsMixin:
os.environ["TZ"] = orig_env_tz
time.tzset()
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_day_time_interval(self):
# SPARK-37277: Test DayTimeIntervalType in createDataFrame without
Arrow.
import pandas as pd
@@ -203,7 +201,7 @@ class DataFrameCreationTestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_schema_inference_from_pandas_with_dict(self):
# SPARK-47543: test for verifying if inferring `dict` as `MapType`
work properly.
diff --git a/python/pyspark/sql/tests/test_dataframe.py
b/python/pyspark/sql/tests/test_dataframe.py
index f84067dec449..7cc3cba77ce8 100644
--- a/python/pyspark/sql/tests/test_dataframe.py
+++ b/python/pyspark/sql/tests/test_dataframe.py
@@ -22,7 +22,6 @@ import shutil
import tempfile
import warnings
import unittest
-from typing import cast
import io
from contextlib import redirect_stdout
@@ -935,7 +934,7 @@ class DataFrameTestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_pandas_api(self):
import pandas as pd
diff --git a/python/pyspark/sql/tests/test_group.py
b/python/pyspark/sql/tests/test_group.py
index f48271f8518c..9d67fe2a3024 100644
--- a/python/pyspark/sql/tests/test_group.py
+++ b/python/pyspark/sql/tests/test_group.py
@@ -30,8 +30,8 @@ from pyspark.testing import assertDataFrameEqual
class GroupTestsMixin:
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
- @unittest.skipIf(not have_pyarrow, pyarrow_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
+ @unittest.skipIf(not have_pyarrow, pyarrow_requirement_message)
def test_agg_func(self):
data = [Row(key=1, value=10), Row(key=1, value=20), Row(key=1,
value=30)]
df = self.spark.createDataFrame(data)
@@ -70,8 +70,8 @@ class GroupTestsMixin:
# test deprecated countDistinct
self.assertEqual(100,
g.agg(functions.countDistinct(df.value)).first()[0])
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
- @unittest.skipIf(not have_pyarrow, pyarrow_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
+ @unittest.skipIf(not have_pyarrow, pyarrow_requirement_message)
def test_group_by_ordinal(self):
spark = self.spark
df = spark.createDataFrame(
@@ -142,8 +142,8 @@ class GroupTestsMixin:
self.assertEqual([["a", 3], ["b", 5]], [list(r) for r in res])
- @unittest.skipIf(not have_pandas, pandas_requirement_message) # type:
ignore
- @unittest.skipIf(not have_pyarrow, pyarrow_requirement_message) # type:
ignore
+ @unittest.skipIf(not have_pandas, pandas_requirement_message)
+ @unittest.skipIf(not have_pyarrow, pyarrow_requirement_message)
def test_order_by_ordinal(self):
spark = self.spark
df = spark.createDataFrame(
diff --git a/python/pyspark/sql/tests/test_listener.py
b/python/pyspark/sql/tests/test_listener.py
index 94bd569b7de8..05480808c74d 100644
--- a/python/pyspark/sql/tests/test_listener.py
+++ b/python/pyspark/sql/tests/test_listener.py
@@ -17,8 +17,6 @@
import os
import unittest
-from typing import cast
-
from pyspark.sql import SparkSession
from pyspark.testing.sqlutils import (
SQLTestUtils,
@@ -89,7 +87,7 @@ class QueryExecutionListenerTests(
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled":
True}):
diff --git a/python/pyspark/sql/tests/test_udf.py
b/python/pyspark/sql/tests/test_udf.py
index 4913733368e0..96c3a64e26d2 100644
--- a/python/pyspark/sql/tests/test_udf.py
+++ b/python/pyspark/sql/tests/test_udf.py
@@ -561,7 +561,7 @@ class BaseUDFTestsMixin(object):
df.select(add_four("id").alias("plus_four")).collect(),
)
- @unittest.skipIf(not test_compiled, test_not_compiled_message) # type:
ignore
+ @unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_register_java_function(self):
with self.temp_func("javaStringLength", "javaStringLength2",
"javaStringLength3"):
self.spark.udf.registerJavaFunction(
@@ -582,7 +582,7 @@ class BaseUDFTestsMixin(object):
[value] = self.spark.sql("SELECT
javaStringLength3('test')").first()
self.assertEqual(value, 4)
- @unittest.skipIf(not test_compiled, test_not_compiled_message) # type:
ignore
+ @unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_register_java_udaf(self):
with self.temp_func("javaUDAF"):
self.spark.udf.registerJavaUDAF("javaUDAF",
"test.org.apache.spark.sql.MyDoubleAvg")
@@ -831,7 +831,7 @@ class BaseUDFTestsMixin(object):
self.assertEqual(rows, [Row(_1=1, _2=2, a="const_str")])
# SPARK-24721
- @unittest.skipIf(not test_compiled, test_not_compiled_message) # type:
ignore
+ @unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
from pyspark.sql.functions import lit, col
diff --git a/python/pyspark/sql/tests/test_udf_profiler.py
b/python/pyspark/sql/tests/test_udf_profiler.py
index 41a5897aae01..4389559c40d8 100644
--- a/python/pyspark/sql/tests/test_udf_profiler.py
+++ b/python/pyspark/sql/tests/test_udf_profiler.py
@@ -23,7 +23,7 @@ import os
import sys
import warnings
from io import StringIO
-from typing import Iterator, cast
+from typing import Iterator
from pyspark import SparkConf
from pyspark.errors import PySparkValueError
@@ -261,7 +261,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_udf_with_arrow(self):
with self.sql_conf({"spark.sql.pyspark.udf.profiler": "perf"}):
@@ -303,7 +303,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_pandas_udf(self):
@pandas_udf("long")
@@ -350,7 +350,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_pandas_udf_iterator(self):
import pandas as pd
@@ -389,7 +389,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_map_in_pandas(self):
df = self.spark.createDataFrame([(1, 21), (2, 30)], ("id",
"age")).repartition(1)
@@ -426,7 +426,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_pandas_udf_window(self):
# WindowInPandasExec
@@ -472,7 +472,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_aggregate_in_pandas(self):
# AggregateInPandasExec
@@ -539,7 +539,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_pandas_udf_grouped_agg_iter(self):
import pandas as pd
@@ -566,7 +566,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_group_apply_in_pandas(self):
# FlatMapGroupsInBatchExec
@@ -588,7 +588,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_cogroup_apply_in_pandas(self):
# FlatMapCoGroupsInBatchExec
@@ -617,7 +617,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_group_apply_in_arrow(self):
# FlatMapGroupsInBatchExec
@@ -642,7 +642,7 @@ class UDFProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_perf_profiler_cogroup_apply_in_arrow(self):
import pyarrow as pa
diff --git a/python/pyspark/testing/sqlutils.py
b/python/pyspark/testing/sqlutils.py
index 2431f19add3b..00b3a1ef504d 100644
--- a/python/pyspark/testing/sqlutils.py
+++ b/python/pyspark/testing/sqlutils.py
@@ -63,7 +63,7 @@ def search_jar(project_relative_path, sbt_jar_name_prefix,
mvn_jar_name_prefix):
return jars[0]
-test_not_compiled_message = None
+test_not_compiled_message = ""
try:
from pyspark.sql.utils import require_test_compiled
@@ -71,7 +71,7 @@ try:
except Exception as e:
test_not_compiled_message = str(e)
-test_compiled = test_not_compiled_message is None
+test_compiled = not test_not_compiled_message
class SQLTestUtils:
diff --git a/python/pyspark/testing/streamingutils.py
b/python/pyspark/testing/streamingutils.py
index 61f040051361..e4f324bf9e5a 100644
--- a/python/pyspark/testing/streamingutils.py
+++ b/python/pyspark/testing/streamingutils.py
@@ -51,9 +51,9 @@ else:
existing_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
jars_args = "--jars %s" % kinesis_asl_assembly_jar
os.environ["PYSPARK_SUBMIT_ARGS"] = " ".join([jars_args,
existing_args])
- kinesis_requirement_message = None # type: ignore
+ kinesis_requirement_message = ""
-should_test_kinesis = kinesis_requirement_message is None
+should_test_kinesis = not kinesis_requirement_message
class PySparkStreamingTestCase(unittest.TestCase):
diff --git a/python/pyspark/testing/utils.py b/python/pyspark/testing/utils.py
index 438143d66c66..6618c55772db 100644
--- a/python/pyspark/testing/utils.py
+++ b/python/pyspark/testing/utils.py
@@ -54,63 +54,63 @@ def have_package(name: str) -> bool:
have_numpy = have_package("numpy")
-numpy_requirement_message = None if have_numpy else "No module named 'numpy'"
+numpy_requirement_message = "" if have_numpy else "No module named 'numpy'"
have_scipy = have_package("scipy")
-scipy_requirement_message = None if have_scipy else "No module named 'scipy'"
+scipy_requirement_message = "" if have_scipy else "No module named 'scipy'"
have_sklearn = have_package("sklearn")
-sklearn_requirement_message = None if have_sklearn else "No module named
'sklearn'"
+sklearn_requirement_message = "" if have_sklearn else "No module named
'sklearn'"
have_torch = have_package("torch")
-torch_requirement_message = None if have_torch else "No module named 'torch'"
+torch_requirement_message = "" if have_torch else "No module named 'torch'"
have_torcheval = have_package("torcheval")
-torcheval_requirement_message = None if have_torcheval else "No module named
'torcheval'"
+torcheval_requirement_message = "" if have_torcheval else "No module named
'torcheval'"
have_deepspeed = have_package("deepspeed")
-deepspeed_requirement_message = None if have_deepspeed else "No module named
'deepspeed'"
+deepspeed_requirement_message = "" if have_deepspeed else "No module named
'deepspeed'"
have_plotly = have_package("plotly")
-plotly_requirement_message = None if have_plotly else "No module named
'plotly'"
+plotly_requirement_message = "" if have_plotly else "No module named 'plotly'"
have_matplotlib = have_package("matplotlib")
-matplotlib_requirement_message = None if have_matplotlib else "No module named
'matplotlib'"
+matplotlib_requirement_message = "" if have_matplotlib else "No module named
'matplotlib'"
have_tabulate = have_package("tabulate")
-tabulate_requirement_message = None if have_tabulate else "No module named
'tabulate'"
+tabulate_requirement_message = "" if have_tabulate else "No module named
'tabulate'"
have_graphviz = have_package("graphviz")
-graphviz_requirement_message = None if have_graphviz else "No module named
'graphviz'"
+graphviz_requirement_message = "" if have_graphviz else "No module named
'graphviz'"
have_flameprof = have_package("flameprof")
-flameprof_requirement_message = None if have_flameprof else "No module named
'flameprof'"
+flameprof_requirement_message = "" if have_flameprof else "No module named
'flameprof'"
have_jinja2 = have_package("jinja2")
-jinja2_requirement_message = None if have_jinja2 else "No module named
'jinja2'"
+jinja2_requirement_message = "" if have_jinja2 else "No module named 'jinja2'"
have_openpyxl = have_package("openpyxl")
-openpyxl_requirement_message = None if have_openpyxl else "No module named
'openpyxl'"
+openpyxl_requirement_message = "" if have_openpyxl else "No module named
'openpyxl'"
have_yaml = have_package("yaml")
-yaml_requirement_message = None if have_yaml else "No module named 'yaml'"
+yaml_requirement_message = "" if have_yaml else "No module named 'yaml'"
have_grpc = have_package("grpc")
-grpc_requirement_message = None if have_grpc else "No module named 'grpc'"
+grpc_requirement_message = "" if have_grpc else "No module named 'grpc'"
have_grpc_status = have_package("grpc_status")
-grpc_status_requirement_message = None if have_grpc_status else "No module
named 'grpc_status'"
+grpc_status_requirement_message = "" if have_grpc_status else "No module named
'grpc_status'"
-googleapis_common_protos_requirement_message = None
+googleapis_common_protos_requirement_message = ""
try:
from google.rpc import error_details_pb2
except ImportError as e:
googleapis_common_protos_requirement_message = str(e)
-have_googleapis_common_protos = googleapis_common_protos_requirement_message
is None
+have_googleapis_common_protos = not
googleapis_common_protos_requirement_message
-pandas_requirement_message = None
+pandas_requirement_message = ""
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
@@ -119,10 +119,10 @@ except Exception as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = str(e)
-have_pandas = pandas_requirement_message is None
+have_pandas = not pandas_requirement_message
-pyarrow_requirement_message = None
+pyarrow_requirement_message = ""
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
@@ -131,7 +131,7 @@ except Exception as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = str(e)
-have_pyarrow = pyarrow_requirement_message is None
+have_pyarrow = not pyarrow_requirement_message
connect_requirement_message = (
@@ -142,14 +142,14 @@ connect_requirement_message = (
or grpc_status_requirement_message
)
-should_test_connect = connect_requirement_message is None
+should_test_connect = not connect_requirement_message
is_ansi_mode_test = True
if os.environ.get("SPARK_ANSI_SQL_MODE") == "false":
is_ansi_mode_test = False
-ansi_mode_not_supported_message = "ANSI mode is not supported" if
is_ansi_mode_test else None
+ansi_mode_not_supported_message = "ANSI mode is not supported" if
is_ansi_mode_test else ""
def read_int(b):
diff --git a/python/pyspark/tests/test_memory_profiler.py
b/python/pyspark/tests/test_memory_profiler.py
index 047ea05a1874..5a77c751e6a0 100644
--- a/python/pyspark/tests/test_memory_profiler.py
+++ b/python/pyspark/tests/test_memory_profiler.py
@@ -23,7 +23,7 @@ import unittest
import warnings
from contextlib import contextmanager
from io import StringIO
-from typing import cast, Iterator
+from typing import Iterator
from unittest import mock
from pyspark import SparkConf
@@ -114,7 +114,7 @@ class MemoryProfilerTests(PySparkTestCase):
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_profile_pandas_udf(self):
udfs = [self.exec_pandas_udf_ser_to_ser,
self.exec_pandas_udf_ser_to_scalar]
@@ -136,7 +136,7 @@ class MemoryProfilerTests(PySparkTestCase):
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_profile_pandas_function_api(self):
apis = [self.exec_grouped_map]
@@ -273,7 +273,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_udf_with_arrow(self):
with self.sql_conf({"spark.sql.pyspark.udf.profiler": "memory"}):
@@ -315,7 +315,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_pandas_udf(self):
@pandas_udf("long")
@@ -339,7 +339,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_pandas_udf_iterator(self):
import pandas as pd
@@ -367,7 +367,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_map_in_pandas(self):
df = self.spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
@@ -386,7 +386,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_pandas_udf_window(self):
# WindowInPandasExec
@@ -411,7 +411,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_aggregate_in_pandas(self):
# AggregateInPandasExec
@@ -434,7 +434,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_group_apply_in_pandas(self):
# FlatMapGroupsInBatchExec
@@ -456,7 +456,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_cogroup_apply_in_pandas(self):
# FlatMapCoGroupsInBatchExec
@@ -485,7 +485,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_group_apply_in_arrow(self):
# FlatMapGroupsInBatchExec
@@ -510,7 +510,7 @@ class MemoryProfiler2TestsMixin:
@unittest.skipIf(
not have_pandas or not have_pyarrow,
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
+ pandas_requirement_message or pyarrow_requirement_message,
)
def test_memory_profiler_cogroup_apply_in_arrow(self):
import pyarrow as pa
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]