[ 
https://issues.apache.org/jira/browse/HIVE-23716?focusedWorklogId=461637&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-461637
 ]

ASF GitHub Bot logged work on HIVE-23716:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 21/Jul/20 15:19
            Start Date: 21/Jul/20 15:19
    Worklog Time Spent: 10m 
      Work Description: pgaref commented on a change in pull request #1147:
URL: https://github.com/apache/hive/pull/1147#discussion_r458180264



##########
File path: 
ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinAntiJoinStringOperator.java
##########
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.mapjoin;
+
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.exec.JoinUtil;
+import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import 
org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinBytesHashSet;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.VectorDesc;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+
+// Single-Column String hash table import.
+// Single-Column String specific imports.
+
+// TODO : Duplicate codes need to merge with semi join.
+/*
+ * Specialized class for doing a vectorized map join that is an anti join on a 
Single-Column String
+ * using a hash set.
+ */
+public class VectorMapJoinAntiJoinStringOperator extends 
VectorMapJoinAntiJoinGenerateResultOperator {
+
+  private static final long serialVersionUID = 1L;
+
+  
//------------------------------------------------------------------------------------------------
+
+  private static final String CLASS_NAME = 
VectorMapJoinAntiJoinStringOperator.class.getName();
+  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
+
+  protected String getLoggingPrefix() {
+    return super.getLoggingPrefix(CLASS_NAME);
+  }
+
+  
//------------------------------------------------------------------------------------------------
+
+  // (none)
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  // The hash map for this specialized class.
+  private transient VectorMapJoinBytesHashSet hashSet;
+
+  //---------------------------------------------------------------------------
+  // Single-Column String specific members.
+  //
+
+  // The column number for this one column join specialization.
+  private transient int singleJoinColumn;
+
+  //---------------------------------------------------------------------------
+  // Pass-thru constructors.
+  //
+
+  /** Kryo ctor. */
+  protected VectorMapJoinAntiJoinStringOperator() {
+    super();
+  }
+
+  public VectorMapJoinAntiJoinStringOperator(CompilationOpContext ctx) {
+    super(ctx);
+  }
+
+  public VectorMapJoinAntiJoinStringOperator(CompilationOpContext ctx, 
OperatorDesc conf,
+                                             VectorizationContext vContext, 
VectorDesc vectorDesc) throws HiveException {
+    super(ctx, conf, vContext, vectorDesc);
+  }
+
+  //---------------------------------------------------------------------------
+  // Process Single-Column String anti Join on a vectorized row batch.
+  //
+
+  @Override
+  protected void commonSetup() throws HiveException {
+    super.commonSetup();
+
+    /*
+     * Initialize Single-Column String members for this specialized class.
+     */
+
+    singleJoinColumn = bigTableKeyColumnMap[0];
+  }
+
+  @Override
+  public void hashTableSetup() throws HiveException {
+    super.hashTableSetup();
+
+    /*
+     * Get our Single-Column String hash set information for this specialized 
class.
+     */
+
+    hashSet = (VectorMapJoinBytesHashSet) vectorMapJoinHashTable;
+  }
+
+  @Override
+  public void processBatch(VectorizedRowBatch batch) throws HiveException {
+
+    try {
+
+      // Do the per-batch setup for an anti join.
+
+      // (Currently none)
+      // antiPerBatchSetup(batch);
+
+      // For anti joins, we may apply the filter(s) now.
+      for(VectorExpression ve : bigTableFilterExpressions) {
+        ve.evaluate(batch);
+      }
+
+      final int inputLogicalSize = batch.size;
+      if (inputLogicalSize == 0) {
+        return;
+      }
+
+      // Perform any key expressions.  Results will go into scratch columns.
+      if (bigTableKeyExpressions != null) {
+        for (VectorExpression ve : bigTableKeyExpressions) {
+          ve.evaluate(batch);
+        }
+      }
+
+      /*
+       * Single-Column String specific declarations.
+       */
+
+      // The one join column for this specialized class.
+      BytesColumnVector joinColVector = (BytesColumnVector) 
batch.cols[singleJoinColumn];
+      byte[][] vector = joinColVector.vector;
+      int[] start = joinColVector.start;
+      int[] length = joinColVector.length;
+
+      /*
+       * Single-Column Long check for repeating.
+       */
+
+      // Check single column for repeating.
+      boolean allKeyInputColumnsRepeating = joinColVector.isRepeating;
+
+      if (allKeyInputColumnsRepeating) {
+
+        /*
+         * Repeating.
+         */
+
+        // All key input columns are repeating.  Generate key once.  Lookup 
once.
+        // Since the key is repeated, we must use entry 0 regardless of 
selectedInUse.
+
+        /*
+         * Single-Column String specific repeated lookup.
+         */
+
+        JoinUtil.JoinResult joinResult;
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
+          joinResult = JoinUtil.JoinResult.MATCH;
+        } else {
+          byte[] keyBytes = vector[0];
+          int keyStart = start[0];
+          int keyLength = length[0];
+          joinResult = hashSet.contains(keyBytes, keyStart, keyLength, 
hashSetResults[0]);
+          if (joinResult == JoinUtil.JoinResult.NOMATCH) {
+            joinResult = JoinUtil.JoinResult.MATCH;
+          } else if (joinResult == JoinUtil.JoinResult.MATCH) {
+            joinResult = JoinUtil.JoinResult.NOMATCH;
+          }
+        }
+
+        /*
+         * Common repeated join result processing.
+         */
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated 
joinResult " + joinResult.name());
+        }
+        finishAntiRepeated(batch, joinResult, hashSetResults[0]);
+      } else {
+
+        /*
+         * NOT Repeating.
+         */
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
+        }
+
+        // We remember any matching rows in matchs / matchSize.  At the end of 
the loop,
+        // selected / batch.size will represent both matching and non-matching 
rows for outer join.
+        // Only deferred rows will have been removed from selected.
+        int selected[] = batch.selected;
+        boolean selectedInUse = batch.selectedInUse;
+
+        int hashSetResultCount = 0;
+        int allMatchCount = 0;
+        int spillCount = 0;
+
+        /*
+         * Single-Column String specific variables.
+         */
+
+        int saveKeyBatchIndex = -1;
+
+        // We optimize performance by only looking up the first key in a 
series of equal keys.
+        boolean haveSaveKey = false;
+        JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+
+        // Logical loop over the rows in the batch since the batch may have 
selected in use.
+        for (int logical = 0; logical < inputLogicalSize; logical++) {
+          int batchIndex = (selectedInUse ? selected[logical] : logical);
+
+          /*
+           * Single-Column String get key.
+           */
+
+          // Implicit -- use batchIndex.
+          boolean isNull = !joinColVector.noNulls && 
joinColVector.isNull[batchIndex];
+
+          /*
+           * Equal key series checking.
+           */
+
+          if (isNull || !haveSaveKey ||
+              !StringExpr.equal(vector[saveKeyBatchIndex], 
start[saveKeyBatchIndex], length[saveKeyBatchIndex],
+                      vector[batchIndex], start[batchIndex], 
length[batchIndex])) {
+
+            // New key.
+
+            if (haveSaveKey) {
+              // Move on with our counts.
+              switch (saveJoinResult) {
+              case MATCH:
+                // We have extracted the existence from the hash set result, 
so we don't keep it.
+                break;
+              case SPILL:
+                // We keep the hash set result for its spill information.
+                hashSetResultCount++;
+                break;
+              case NOMATCH:
+                break;
+              }
+            }
+
+            if (isNull) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information 
to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column String specific save key and lookup.
+               */
+  
+              saveKeyBatchIndex = batchIndex;
+  
+              /*
+               * Single-Column String specific lookup key.
+               */
+  
+              byte[] keyBytes = vector[batchIndex];
+              int keyStart = start[batchIndex];
+              int keyLength = length[batchIndex];
+              saveJoinResult = hashSet.contains(keyBytes, keyStart, keyLength, 
hashSetResults[hashSetResultCount]);
+              if (saveJoinResult == JoinUtil.JoinResult.NOMATCH) {

Review comment:
       Inversion func




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 461637)
    Time Spent: 4h 40m  (was: 4.5h)

> Support Anti Join in Hive 
> --------------------------
>
>                 Key: HIVE-23716
>                 URL: https://issues.apache.org/jira/browse/HIVE-23716
>             Project: Hive
>          Issue Type: Bug
>            Reporter: mahesh kumar behera
>            Assignee: mahesh kumar behera
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HIVE-23716.01.patch
>
>          Time Spent: 4h 40m
>  Remaining Estimate: 0h
>
> Currently hive does not support Anti join. The query for anti join is 
> converted to left outer join and null filter on right side join key is added 
> to get the desired result. This is causing
>  # Extra computation — The left outer join projects the redundant columns 
> from right side. Along with that, filtering is done to remove the redundant 
> rows. This is can be avoided in case of anti join as anti join will project 
> only the required columns and rows from the left side table.
>  # Extra shuffle — In case of anti join the duplicate records moved to join 
> node can be avoided from the child node. This can reduce significant amount 
> of data movement if the number of distinct rows( join keys) is significant.
>  # Extra Memory Usage - In case of map based anti join , hash set is 
> sufficient as just the key is required to check  if the records matches the 
> join condition. In case of left join, we need the key and the non key columns 
> also and thus a hash table will be required.
> For a query like
> {code:java}
>  select wr_order_number FROM web_returns LEFT JOIN web_sales  ON 
> wr_order_number = ws_order_number WHERE ws_order_number IS NULL;{code}
> The number of distinct ws_order_number in web_sales table in a typical 10TB 
> TPCDS set up is just 10% of total records. So when we convert this query to 
> anti join, instead of 7 billion rows, only 600 million rows are moved to join 
> node.
> In the current patch, just one conversion is done. The pattern of 
> project->filter->left-join is converted to project->anti-join. This will take 
> care of sub queries with “not exists” clause. The queries with “not exists” 
> are converted first to filter + left-join and then its converted to anti 
> join. The queries with “not in” are not handled in the current patch.
> From execution side, both merge join and map join with vectorized execution  
> is supported for anti join.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to