Github user icexelloss commented on a diff in the pull request:
https://github.com/apache/spark/pull/22104#discussion_r212460124
--- Diff: python/pyspark/sql/tests.py ---
@@ -3367,6 +3367,35 @@ def test_ignore_column_of_all_nulls(self):
finally:
shutil.rmtree(path)
+ # SPARK-24721
+ def test_datasource_with_udf_filter_lit_input(self):
+ import pandas as pd
+ import numpy as np
+ from pyspark.sql.functions import udf, pandas_udf, lit, col
+
+ path = tempfile.mkdtemp()
+ shutil.rmtree(path)
+ try:
+
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
+ filesource_df = self.spark.read.csv(path)
--- End diff --
Created separate tests for pandas_udf under ScalarPandasUDFTests
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]