mengxr commented on a change in pull request #24643:
[SPARK-26412][PySpark][SQL] Allow Pandas UDF to take an iterator of pd.Series
or an iterator of tuple of pd.Series
URL: https://github.com/apache/spark/pull/24643#discussion_r293602304
##########
File path: python/pyspark/sql/tests/test_pandas_udf_scalar.py
##########
@@ -136,84 +150,115 @@ def
test_register_nondeterministic_vectorized_udf_basic(self):
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
+ def random_iter_udf(it):
+ for i in it:
+ yield random.randint(6, 6) + i
+ random_pandas_iter_udf = pandas_udf(
+ random_iter_udf, IntegerType(),
PandasUDFType.SCALAR_ITER).asNondeterministic()
+ self.assertEqual(random_pandas_iter_udf.deterministic, False)
+ self.assertEqual(random_pandas_iter_udf.evalType,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
+ nondeterministic_pandas_iter_udf = self.spark.catalog.registerFunction(
+ "randomPandasIterUDF", random_pandas_iter_udf)
+ self.assertEqual(nondeterministic_pandas_iter_udf.deterministic, False)
+ self.assertEqual(nondeterministic_pandas_iter_udf.evalType,
+ PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
+ [row] = self.spark.sql("SELECT randomPandasIterUDF(1)").collect()
+ self.assertEqual(row[0], 7)
+
def test_vectorized_udf_null_boolean(self):
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
- bool_f = pandas_udf(lambda x: x, BooleanType())
- res = df.select(bool_f(col('bool')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ bool_f = pandas_udf(lambda x: x, BooleanType(), udf_type)
+ res = df.select(bool_f(col('bool')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
- byte_f = pandas_udf(lambda x: x, ByteType())
- res = df.select(byte_f(col('byte')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ byte_f = pandas_udf(lambda x: x, ByteType(), udf_type)
+ res = df.select(byte_f(col('byte')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
- short_f = pandas_udf(lambda x: x, ShortType())
- res = df.select(short_f(col('short')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ short_f = pandas_udf(lambda x: x, ShortType(), udf_type)
+ res = df.select(short_f(col('short')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
- int_f = pandas_udf(lambda x: x, IntegerType())
- res = df.select(int_f(col('int')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ int_f = pandas_udf(lambda x: x, IntegerType(), udf_type)
+ res = df.select(int_f(col('int')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
- long_f = pandas_udf(lambda x: x, LongType())
- res = df.select(long_f(col('long')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ long_f = pandas_udf(lambda x: x, LongType(), udf_type)
+ res = df.select(long_f(col('long')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
- float_f = pandas_udf(lambda x: x, FloatType())
- res = df.select(float_f(col('float')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ float_f = pandas_udf(lambda x: x, FloatType(), udf_type)
+ res = df.select(float_f(col('float')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
- double_f = pandas_udf(lambda x: x, DoubleType())
- res = df.select(double_f(col('double')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ double_f = pandas_udf(lambda x: x, DoubleType(), udf_type)
+ res = df.select(double_f(col('double')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
- decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
- res = df.select(decimal_f(col('decimal')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18), udf_type)
+ res = df.select(decimal_f(col('decimal')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
- str_f = pandas_udf(lambda x: x, StringType())
- res = df.select(str_f(col('str')))
- self.assertEquals(df.collect(), res.collect())
+ for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
+ str_f = pandas_udf(lambda x: x, StringType(), udf_type)
+ res = df.select(str_f(col('str')))
+ self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
df = self.spark.range(10)
- str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
- actual = df.select(str_f(col('id')))
- expected = df.select(col('id').cast('string'))
- self.assertEquals(expected.collect(), actual.collect())
+ scalar_f = lambda x: pd.Series(map(str, x))
+
+ def iter_f(it):
+ for i in it:
+ yield pd.Series(map(str, i))
Review comment:
minor: `yield scala_f(i)`
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]