Github user dilipbiswal commented on a diff in the pull request:
https://github.com/apache/spark/pull/22326#discussion_r220463070
--- Diff:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/joins.scala
---
@@ -152,3 +153,56 @@ object EliminateOuterJoin extends Rule[LogicalPlan]
with PredicateHelper {
if (j.joinType == newJoinType) f else Filter(condition,
j.copy(joinType = newJoinType))
}
}
+
+/**
+ * Correctly handle PythonUDF which need access both side of join side by
changing the new join
+ * type to Cross.
+ */
+object HandlePythonUDFInJoinCondition extends Rule[LogicalPlan] with
PredicateHelper {
+ def hasPythonUDF(expression: Expression): Boolean = {
+ expression.collectFirst { case udf: PythonUDF => udf }.isDefined
+ }
+
+ override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
+ case j @ Join(_, _, joinType, condition)
+ if
condition.map(splitConjunctivePredicates).getOrElse(Nil).exists(hasPythonUDF) =>
+ if (!joinType.isInstanceOf[InnerLike] && joinType != LeftSemi) {
+ // The current strategy only support InnerLike and LeftSemi join
because for other type,
+ // it breaks SQL semantic if we run the join condition as a filter
after join. If we pass
+ // the plan here, it'll still get a an invalid PythonUDF
RuntimeException with message
+ // `requires attributes from more than one child`, we throw
firstly here for better
+ // readable information.
+ throw new AnalysisException("Using PythonUDF in join condition of
join type" +
+ s" $joinType is not supported.")
+ }
+ if (SQLConf.get.crossJoinEnabled) {
--- End diff --
@mgaido91 Its probably because this suite only exercises one rule of the
optimizer ? :-)
```
object Optimize extends RuleExecutor[LogicalPlan] {
val batches = Batch("Check Cartesian Products", Once,
CheckCartesianProducts) :: Nil
}
```
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]