leanken commented on a change in pull request #29304:
URL: https://github.com/apache/spark/pull/29304#discussion_r464019324



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
##########
@@ -327,11 +327,29 @@ private[joins] object UnsafeHashedRelation {
     // Create a mapping of buildKeys -> rows
     val keyGenerator = UnsafeProjection.create(key)
     var numFields = 0
+    val nullPaddingCombinations: Seq[UnsafeProjection] = if (isNullAware) {
+      // C(numKeys, 0), C(numKeys, 1) ... C(numKeys, numKeys - 1)

Review comment:
       updated

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
##########
@@ -393,33 +395,42 @@ object PhysicalWindow {
 
 object ExtractSingleColumnNullAwareAntiJoin extends JoinSelectionHelper with 
PredicateHelper {
 
-  // TODO support multi column NULL-aware anti join in future.
-  // See. http://www.vldb.org/pvldb/vol2/vldb09-423.pdf Section 6
-  // multi-column null aware anti join is much more complicated than single 
column ones.
-
   // streamedSideKeys, buildSideKeys
   private type ReturnType = (Seq[Expression], Seq[Expression])
 
-  /**
-   * See. [SPARK-32290]
-   * LeftAnti(condition: Or(EqualTo(a=b), IsNull(EqualTo(a=b)))
-   * will almost certainly be planned as a Broadcast Nested Loop join,
-   * which is very time consuming because it's an O(M*N) calculation.
-   * But if it's a single column case O(M*N) calculation could be optimized 
into O(M)
-   * using hash lookup instead of loop lookup.
-   */
   def unapply(join: Join): Option[ReturnType] = join match {
-    case Join(left, right, LeftAnti,
-      Some(Or(e @ EqualTo(leftAttr: AttributeReference, rightAttr: 
AttributeReference),
-        IsNull(e2 @ EqualTo(_, _)))), _)
-        if SQLConf.get.optimizeNullAwareAntiJoin &&
-          e.semanticEquals(e2) =>
-      if (canEvaluate(leftAttr, left) && canEvaluate(rightAttr, right)) {
-        Some(Seq(leftAttr), Seq(rightAttr))
-      } else if (canEvaluate(leftAttr, right) && canEvaluate(rightAttr, left)) 
{
-        Some(Seq(rightAttr), Seq(leftAttr))
-      } else {
+    case Join(left, right, LeftAnti, condition, _) if 
SQLConf.get.optimizeNullAwareAntiJoin =>
+      val predicates = condition.map(splitConjunctivePredicates).getOrElse(Nil)
+      if (predicates.isEmpty ||
+        predicates.length > SQLConf.get.optimizeNullAwareAntiJoinMaxNumKeys) {
         None
+      } else {
+        val leftKeys = ArrayBuffer[Expression]()
+        val rightKeys = ArrayBuffer[Expression]()
+
+        // all predicate must match pattern condition: Or(EqualTo(a=b), 
IsNull(EqualTo(a=b)))
+        val allMatch = predicates.forall {

Review comment:
       updated

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
##########
@@ -393,33 +395,42 @@ object PhysicalWindow {
 
 object ExtractSingleColumnNullAwareAntiJoin extends JoinSelectionHelper with 
PredicateHelper {
 
-  // TODO support multi column NULL-aware anti join in future.
-  // See. http://www.vldb.org/pvldb/vol2/vldb09-423.pdf Section 6
-  // multi-column null aware anti join is much more complicated than single 
column ones.
-
   // streamedSideKeys, buildSideKeys
   private type ReturnType = (Seq[Expression], Seq[Expression])
 
-  /**
-   * See. [SPARK-32290]
-   * LeftAnti(condition: Or(EqualTo(a=b), IsNull(EqualTo(a=b)))
-   * will almost certainly be planned as a Broadcast Nested Loop join,
-   * which is very time consuming because it's an O(M*N) calculation.
-   * But if it's a single column case O(M*N) calculation could be optimized 
into O(M)
-   * using hash lookup instead of loop lookup.
-   */
   def unapply(join: Join): Option[ReturnType] = join match {
-    case Join(left, right, LeftAnti,
-      Some(Or(e @ EqualTo(leftAttr: AttributeReference, rightAttr: 
AttributeReference),
-        IsNull(e2 @ EqualTo(_, _)))), _)
-        if SQLConf.get.optimizeNullAwareAntiJoin &&
-          e.semanticEquals(e2) =>
-      if (canEvaluate(leftAttr, left) && canEvaluate(rightAttr, right)) {
-        Some(Seq(leftAttr), Seq(rightAttr))
-      } else if (canEvaluate(leftAttr, right) && canEvaluate(rightAttr, left)) 
{
-        Some(Seq(rightAttr), Seq(leftAttr))
-      } else {
+    case Join(left, right, LeftAnti, condition, _) if 
SQLConf.get.optimizeNullAwareAntiJoin =>
+      val predicates = condition.map(splitConjunctivePredicates).getOrElse(Nil)
+      if (predicates.isEmpty ||
+        predicates.length > SQLConf.get.optimizeNullAwareAntiJoinMaxNumKeys) {
         None
+      } else {
+        val leftKeys = ArrayBuffer[Expression]()
+        val rightKeys = ArrayBuffer[Expression]()
+
+        // all predicate must match pattern condition: Or(EqualTo(a=b), 
IsNull(EqualTo(a=b)))

Review comment:
       done

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala
##########
@@ -245,7 +244,7 @@ case class BroadcastHashJoinExec(
            |boolean $found = false;
            |// generate join key for stream side
            |${keyEv.code}
-           |if ($anyNull) {
+           |if (${ if (isLongHashedRelation) s"$anyNull" else 
s"${keyEv.value}.allNull()"}) {

Review comment:
       done

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
##########
@@ -2678,14 +2678,26 @@ object SQLConf {
       .checkValue(_ >= 0, "The value must be non-negative.")
       .createWithDefault(8)
 
+  val OPTIMIZE_NULL_AWARE_ANTI_JOIN_MAX_NUM_KEYS =
+    buildConf("spark.sql.optimizeNullAwareAntiJoin.maxNumKeys")
+      .internal()
+      .doc("The maximum number of keys that will be supported to use NAAJ 
optimize. " +
+        "While with NAAJ optimize, buildSide data would be expanded to 
(2^numKeys - 1) times, " +
+        "it might cause Driver OOM if NAAJ numKeys increased, since it is 
exponential growth. " +
+        "It's ok to increase this configuration if buildSide is small enough 
and safe enough " +
+        "to do such exponential expansion to gain performance improvement from 
O(M*N) to O(M).")

Review comment:
       done remove it.

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
##########
@@ -2678,14 +2678,26 @@ object SQLConf {
       .checkValue(_ >= 0, "The value must be non-negative.")
       .createWithDefault(8)
 
+  val OPTIMIZE_NULL_AWARE_ANTI_JOIN_MAX_NUM_KEYS =
+    buildConf("spark.sql.optimizeNullAwareAntiJoin.maxNumKeys")
+      .internal()
+      .doc("The maximum number of keys that will be supported to use NAAJ 
optimize. " +
+        "While with NAAJ optimize, buildSide data would be expanded to 
(2^numKeys - 1) times, " +
+        "it might cause Driver OOM if NAAJ numKeys increased, since it is 
exponential growth. " +
+        "It's ok to increase this configuration if buildSide is small enough 
and safe enough " +
+        "to do such exponential expansion to gain performance improvement from 
O(M*N) to O(M).")
+      .intConf

Review comment:
       done




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to