BryanCutler commented on a change in pull request #24981: [SPARK-27463][PYTHON]
Support Dataframe Cogroup via Pandas UDFs
URL: https://github.com/apache/spark/pull/24981#discussion_r319261673
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/python/FlatMapGroupsInPandasExec.scala
##########
@@ -75,88 +71,23 @@ case class FlatMapGroupsInPandasExec(
override protected def doExecute(): RDD[InternalRow] = {
val inputRDD = child.execute()
- val chainedFunc = Seq(ChainedPythonFunctions(Seq(pandasFunction)))
- val sessionLocalTimeZone = conf.sessionLocalTimeZone
- val pythonRunnerConf = ArrowUtils.getPythonRunnerConfMap(conf)
-
- // Deduplicate the grouping attributes.
- // If a grouping attribute also appears in data attributes, then we don't
need to send the
- // grouping attribute to Python worker. If a grouping attribute is not in
data attributes,
- // then we need to send this grouping attribute to python worker.
- //
- // We use argOffsets to distinguish grouping attributes and data
attributes as following:
- //
- // argOffsets[0] is the length of grouping attributes
- // argOffsets[1 .. argOffsets[0]+1] is the arg offsets for grouping
attributes
- // argOffsets[argOffsets[0]+1 .. ] is the arg offsets for data attributes
-
- val dataAttributes = child.output.drop(groupingAttributes.length)
- val groupingIndicesInData = groupingAttributes.map { attribute =>
- dataAttributes.indexWhere(attribute.semanticEquals)
- }
-
- val groupingArgOffsets = new ArrayBuffer[Int]
- val nonDupGroupingAttributes = new ArrayBuffer[Attribute]
- val nonDupGroupingSize = groupingIndicesInData.count(_ == -1)
-
- // Non duplicate grouping attributes are added to nonDupGroupingAttributes
and
- // their offsets are 0, 1, 2 ...
- // Duplicate grouping attributes are NOT added to nonDupGroupingAttributes
and
- // their offsets are n + index, where n is the total number of non
duplicate grouping
- // attributes and index is the index in the data attributes that the
grouping attribute
- // is a duplicate of.
-
- groupingAttributes.zip(groupingIndicesInData).foreach {
- case (attribute, index) =>
- if (index == -1) {
- groupingArgOffsets += nonDupGroupingAttributes.length
- nonDupGroupingAttributes += attribute
- } else {
- groupingArgOffsets += index + nonDupGroupingSize
- }
- }
-
- val dataArgOffsets = nonDupGroupingAttributes.length until
- (nonDupGroupingAttributes.length + dataAttributes.length)
-
- val argOffsets = Array(Array(groupingAttributes.length) ++
groupingArgOffsets ++ dataArgOffsets)
-
- // Attributes after deduplication
- val dedupAttributes = nonDupGroupingAttributes ++ dataAttributes
- val dedupSchema = StructType.fromAttributes(dedupAttributes)
+ val (dedupAttributes, argOffsets) = resolveArgOffsets(child,
groupingAttributes)
// Map grouped rows to ArrowPythonRunner results, Only execute if
partition is not empty
inputRDD.mapPartitionsInternal { iter => if (iter.isEmpty) iter else {
- val grouped = if (groupingAttributes.isEmpty) {
- Iterator(iter)
- } else {
- val groupedIter = GroupedIterator(iter, groupingAttributes,
child.output)
Review comment:
I think you need to remove unused imports here as well
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]