dtenedor commented on code in PR #42420:
URL: https://github.com/apache/spark/pull/42420#discussion_r1297784284
##########
python/pyspark/worker.py:
##########
@@ -573,9 +574,73 @@ def read_udtf(pickleSer, infile, eval_type):
f"The return type of a UDTF must be a struct type, but got
{type(return_type)}."
)
+ class UDTFWithPartitions:
+ """
+ This implements the logic of a UDTF that accepts an input TABLE
argument with one or more
+ PARTITION BY expressions.
+
+ Parameters
+ ----------
+ create_udtf: function
+ Function to create a new instance of the UDTF to be invoked.
+ partition_child_indexes: list
+ List of integers identifying zero-based indexes of the columns of
the input table that
+ contain projected partitioning expressions. This class will
inspect these values for
+ each pair of consecutive input rows. When they change, this
indicates the boundary
+ between two partitions, and we will invoke the 'terminate' method
on the UDTF class
+ instance and then destroy it and create a new one to implement the
desired partitioning
+ semantics.
Review Comment:
@ueshin told me to put it under the class :) I moved the Parameters part
under `__init__` and left a general description under the class.
##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala:
##########
@@ -2118,11 +2121,32 @@ class Analyzer(override val catalogManager:
CatalogManager) extends RuleExecutor
tableArgs.size)
}
val alias =
SubqueryAlias.generateSubqueryName(s"_${tableArgs.size}")
+ // Propagate the column indexes for TABLE arguments to the
PythonUDTF instance.
+ val tvfWithTableColumnIndexes: LogicalPlan = tvf match {
+ case g @ Generate(p: PythonUDTF, _, _, _, _, _) =>
+ functionTableSubqueryArgs.headOption.map { tableArg =>
+ val indexes = PythonUDTFPartitionColumnIndexes(
+ tableArg.partitioningExpressionIndexes)
+ g.copy(generator = p.copy(pythonUDTFPartitionColumnIndexes
= Some(indexes)))
+ }.getOrElse {
+ g
+ }
+ case g @ Generate(p: UnresolvedPolymorphicPythonUDTF, _, _, _,
_, _) =>
+ functionTableSubqueryArgs.headOption.map { tableArg =>
+ val indexes = PythonUDTFPartitionColumnIndexes(
+ tableArg.partitioningExpressionIndexes)
+ g.copy(generator = p.copy(pythonUDTFPartitionColumnIndexes
= Some(indexes)))
+ }.getOrElse {
+ g
Review Comment:
That doesn't compile because the type of the object that `p` refers to is
not determinable. I was able to deduplicate this logic into a helper function
though to avoid repeating it.
##########
python/pyspark/worker.py:
##########
@@ -573,9 +574,73 @@ def read_udtf(pickleSer, infile, eval_type):
f"The return type of a UDTF must be a struct type, but got
{type(return_type)}."
)
+ class UDTFWithPartitions:
+ """
+ This implements the logic of a UDTF that accepts an input TABLE
argument with one or more
+ PARTITION BY expressions.
Review Comment:
Good idea, done.
##########
python/pyspark/worker.py:
##########
@@ -573,9 +574,73 @@ def read_udtf(pickleSer, infile, eval_type):
f"The return type of a UDTF must be a struct type, but got
{type(return_type)}."
)
+ class UDTFWithPartitions:
+ """
+ This implements the logic of a UDTF that accepts an input TABLE
argument with one or more
+ PARTITION BY expressions.
+
+ Parameters
+ ----------
+ create_udtf: function
+ Function to create a new instance of the UDTF to be invoked.
+ partition_child_indexes: list
+ List of integers identifying zero-based indexes of the columns of
the input table that
+ contain projected partitioning expressions. This class will
inspect these values for
+ each pair of consecutive input rows. When they change, this
indicates the boundary
+ between two partitions, and we will invoke the 'terminate' method
on the UDTF class
+ instance and then destroy it and create a new one to implement the
desired partitioning
+ semantics.
+ """
+ def __init__(self, create_udtf, partition_child_indexes):
+ self._create_udtf = create_udtf
+ self._udtf = create_udtf()
+ self._prev_arguments = None
+ self._partition_child_indexes = partition_child_indexes
+
+ def eval(self, *args, **kwargs):
+ changed_partitions = self._check_partition_boundaries(
+ list(args) + list(kwargs.values()))
+ if changed_partitions:
+ if self._udtf.terminate is not None:
+ result = self._udtf.terminate()
+ if result is not None:
+ for row in result:
+ yield row
Review Comment:
Do these joins refer to the internal lateral joins we generate as part of
planning the UDTF call with a TABLE argument? That shouldn't matter since that
planning projects adds a out only the UDTF output columns (dropping the input
table columns).
I added a test case where we do an explicit lateral join to a UDTF that
takes a TABLE argument with PARTITION BY. This concern should not apply for
that case because the row on the left side of the lateral join gets prepended
from all the output rows of the UDTF (whether from "eval" or "terminate"). But
it is a good extra test case to throw in there.
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/python/BatchEvalPythonUDTFExec.scala:
##########
@@ -134,6 +134,13 @@ object PythonUDTFRunner {
dataOut.writeBoolean(false)
}
}
+ udtf.pythonUDTFPartitionColumnIndexes match {
+ case Some(partitionColumnIndexes) =>
+ dataOut.writeInt(partitionColumnIndexes.partitionChildIndexes.length)
+ partitionColumnIndexes.partitionChildIndexes.foreach(dataOut.writeInt)
Review Comment:
Good suggestion, done!
##########
python/pyspark/worker.py:
##########
@@ -573,9 +574,73 @@ def read_udtf(pickleSer, infile, eval_type):
f"The return type of a UDTF must be a struct type, but got
{type(return_type)}."
)
+ class UDTFWithPartitions:
+ """
+ This implements the logic of a UDTF that accepts an input TABLE
argument with one or more
+ PARTITION BY expressions.
+
+ Parameters
+ ----------
+ create_udtf: function
+ Function to create a new instance of the UDTF to be invoked.
+ partition_child_indexes: list
+ List of integers identifying zero-based indexes of the columns of
the input table that
+ contain projected partitioning expressions. This class will
inspect these values for
+ each pair of consecutive input rows. When they change, this
indicates the boundary
+ between two partitions, and we will invoke the 'terminate' method
on the UDTF class
+ instance and then destroy it and create a new one to implement the
desired partitioning
+ semantics.
+ """
+ def __init__(self, create_udtf, partition_child_indexes):
+ self._create_udtf = create_udtf
+ self._udtf = create_udtf()
+ self._prev_arguments = None
+ self._partition_child_indexes = partition_child_indexes
+
+ def eval(self, *args, **kwargs):
+ changed_partitions = self._check_partition_boundaries(
+ list(args) + list(kwargs.values()))
+ if changed_partitions:
+ if self._udtf.terminate is not None:
+ result = self._udtf.terminate()
+ if result is not None:
+ for row in result:
+ yield row
+ self._udtf = self._create_udtf()
+ if self._udtf.eval is not None:
+ result = self._udtf.eval(*args, **kwargs)
+ if result is not None:
+ for row in result:
+ yield row
+
+ def terminate(self):
+ if self._udtf.terminate is not None:
+ return self._udtf.terminate()
+
+ def _check_partition_boundaries(self, arguments):
+ result = False
Review Comment:
Done.
##########
python/pyspark/worker.py:
##########
@@ -573,9 +574,73 @@ def read_udtf(pickleSer, infile, eval_type):
f"The return type of a UDTF must be a struct type, but got
{type(return_type)}."
)
+ class UDTFWithPartitions:
+ """
+ This implements the logic of a UDTF that accepts an input TABLE
argument with one or more
+ PARTITION BY expressions.
+
+ Parameters
+ ----------
+ create_udtf: function
+ Function to create a new instance of the UDTF to be invoked.
+ partition_child_indexes: list
+ List of integers identifying zero-based indexes of the columns of
the input table that
+ contain projected partitioning expressions. This class will
inspect these values for
+ each pair of consecutive input rows. When they change, this
indicates the boundary
+ between two partitions, and we will invoke the 'terminate' method
on the UDTF class
+ instance and then destroy it and create a new one to implement the
desired partitioning
+ semantics.
+ """
+ def __init__(self, create_udtf, partition_child_indexes):
Review Comment:
Sure, done.
##########
python/pyspark/worker.py:
##########
@@ -573,9 +574,73 @@ def read_udtf(pickleSer, infile, eval_type):
f"The return type of a UDTF must be a struct type, but got
{type(return_type)}."
)
+ class UDTFWithPartitions:
+ """
+ This implements the logic of a UDTF that accepts an input TABLE
argument with one or more
+ PARTITION BY expressions.
+
+ Parameters
+ ----------
+ create_udtf: function
+ Function to create a new instance of the UDTF to be invoked.
+ partition_child_indexes: list
+ List of integers identifying zero-based indexes of the columns of
the input table that
+ contain projected partitioning expressions. This class will
inspect these values for
+ each pair of consecutive input rows. When they change, this
indicates the boundary
+ between two partitions, and we will invoke the 'terminate' method
on the UDTF class
+ instance and then destroy it and create a new one to implement the
desired partitioning
+ semantics.
+ """
+ def __init__(self, create_udtf, partition_child_indexes):
+ self._create_udtf = create_udtf
+ self._udtf = create_udtf()
+ self._prev_arguments = None
+ self._partition_child_indexes = partition_child_indexes
+
+ def eval(self, *args, **kwargs):
+ changed_partitions = self._check_partition_boundaries(
+ list(args) + list(kwargs.values()))
+ if changed_partitions:
+ if self._udtf.terminate is not None:
+ result = self._udtf.terminate()
+ if result is not None:
+ for row in result:
+ yield row
+ self._udtf = self._create_udtf()
Review Comment:
Sounds good. Let's maybe decouple that from the primary implementation in
this PR.
This number of times would be equivalent to the number of unique
combinations of values of the PARTITION BY expression list.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]