HyukjinKwon commented on PR #46417:
URL: https://github.com/apache/spark/pull/46417#issuecomment-2099995116
```
======================================================================
ERROR [2.562s]: test_apply_infer_schema_without_shortcut
(pyspark.pandas.tests.connect.groupby.test_parity_apply_func.GroupbyParityApplyFuncTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/tests/groupby/test_apply_func.py",
line 240, in test_apply_infer_schema_without_shortcut
self.assert_eq(
File
"/home/runner/work/spark/spark-3.5/python/pyspark/testing/pandasutils.py", line
525, in assert_eq
return assertPandasOnSparkEqual(
File
"/home/runner/work/spark/spark-3.5/python/pyspark/testing/pandasutils.py", line
457, in assertPandasOnSparkEqual
actual = actual.to_pandas()
File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/frame.py",
line 5428, in to_pandas
return self._to_pandas()
File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/frame.py",
line 5434, in _to_pandas
return self._internal.to_pandas_frame.copy()
File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/utils.py",
line 600, in wrapped_lazy_property
setattr(self, attr_name, fn(self))
File
"/home/runner/work/spark/spark-3.5/python/pyspark/pandas/internal.py", line
1115, in to_pandas_frame
pdf = sdf.toPandas()
File
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/dataframe.py",
line 1663, in toPandas
return self._session.client.to_pandas(query)
File
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py",
line 873, in to_pandas
table, schema, metrics, observed_metrics, _ = self._execute_and_fetch(
File
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py",
line 1283, in _execute_and_fetch
for response in self._execute_and_fetch_as_iterator(req):
File
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py",
line 1264, in _execute_and_fetch_as_iterator
self._handle_error(error)
File
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py",
line 1503, in _handle_error
self._handle_rpc_error(error)
File
"/home/runner/work/spark/spark-3.5/python/pyspark/sql/connect/client/core.py",
line 1539, in _handle_rpc_error
raise convert_exception(info, status.message) from None
pyspark.errors.exceptions.connect.PythonException:
An exception was thrown from the Python worker. Please see the stack trace
below.
Traceback (most recent call last):
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
1834, in main
process()
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
1826, in process
serializer.dump_stream(out_iter, outfile)
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
line 531, in dump_stream
return ArrowStreamSerializer.dump_stream(self,
init_stream_yield_batches(), stream)
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
line 104, in dump_stream
for batch in iterator:
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/sql/pandas/serializers.py",
line 524, in init_stream_yield_batches
for series in iterator:
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
1610, in mapper
return f(keys, vals)
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
488, in <lambda>
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
478, in wrapped
result = f(pd.concat(value_series, axis=1))
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/util.py", line
134, in wrapper
return f(*args, **kwargs)
File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/groupby.py",
line 2307, in rename_output
pdf.columns = return_schema.names
for series in iterator:
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
1610, in mapper
return f(keys, vals)
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
488, in <lambda>
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/worker.py", line
478, in wrapped
result = f(pd.concat(value_series, axis=1))
File
"/home/runner/work/spark/spark/python/lib/pyspark.zip/pyspark/util.py", line
134, in wrapper
return f(*args, **kwargs)
File "/home/runner/work/spark/spark-3.5/python/pyspark/pandas/groupby.py",
line 2307, in rename_output
pdf.columns = return_schema.names
File
"/usr/share/miniconda/envs/server-env/lib/python3.10/site-packages/pandas/core/generic.py",
line 6313, in __setattr__
return object.__setattr__(self, name, value)
File "properties.pyx", line 69, in
pandas._libs.properties.AxisProperty.__set__
File
"/usr/share/miniconda/envs/server-env/lib/python3.10/site-packages/pandas/core/generic.py",
line 814, in _set_axis
self._mgr.set_axis(axis, labels)
File
"/usr/share/miniconda/envs/server-env/lib/python3.10/site-packages/pandas/core/...
----------------------------------------------------------------------
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]