sohami commented on a change in pull request #1334: DRILL-6385: Support JPPD 
feature
URL: https://github.com/apache/drill/pull/1334#discussion_r210052850
 
 

 ##########
 File path: 
exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/filter/RuntimeFilterRecordBatch.java
 ##########
 @@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.physical.impl.filter;
+
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.PathSegment;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.exception.OutOfMemoryException;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.expr.ValueVectorReadExpression;
+import org.apache.drill.exec.expr.fn.impl.ValueVectorHashHelper;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.config.Filter;
+import org.apache.drill.exec.record.AbstractSingleRecordBatch;
+import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
+import org.apache.drill.exec.record.RecordBatch;
+import org.apache.drill.exec.record.TransferPair;
+import org.apache.drill.exec.record.TypedFieldId;
+import org.apache.drill.exec.record.VectorWrapper;
+import org.apache.drill.exec.record.selection.SelectionVector2;
+import org.apache.drill.exec.record.selection.SelectionVector4;
+import org.apache.drill.exec.work.filter.BloomFilter;
+import org.apache.drill.exec.work.filter.RuntimeFilterWritable;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A RuntimeFilterRecordBatch steps over the ScanBatch. If the ScanBatch 
participates
+ * in the HashJoinBatch and can be applied by a RuntimeFilter, it will 
generate a filtered
+ * SV2, otherwise will generate a same recordCount-originalRecordCount SV2 
which will not affect
+ * the Query's performance ,but just do a memory transfer by the later 
RemovingRecordBatch op.
+ */
+public class RuntimeFilterRecordBatch extends 
AbstractSingleRecordBatch<Filter> {
+  private SelectionVector2 sv2;
+
+  private ValueVectorHashHelper.Hash64 hash64;
+  private Map<String, Integer> field2id = new HashMap<>();
+  private List<String> toFilterFields;
+  private int originalRecordCount;
+  private int recordCount;
+  private static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(RuntimeFilterRecordBatch.class);
+
+  public RuntimeFilterRecordBatch(Filter pop, RecordBatch incoming, 
FragmentContext context) throws OutOfMemoryException {
+    super(pop, context, incoming);
+  }
+
+  @Override
+  public FragmentContext getContext() {
+    return context;
+  }
+
+  @Override
+  public int getRecordCount() {
+    return sv2.getCount();
+  }
+
+  @Override
+  public SelectionVector2 getSelectionVector2() {
+    return sv2;
+  }
+
+  @Override
+  public SelectionVector4 getSelectionVector4() {
+    return null;
+  }
+
+  @Override
+  protected IterOutcome doWork() {
+    container.transferIn(incoming.getContainer());
+    originalRecordCount = incoming.getRecordCount();
+    sv2.setOriginalRecordCount(originalRecordCount);
+    try {
+      applyRuntimeFilter();
+    } catch (SchemaChangeException e) {
+      throw new UnsupportedOperationException(e);
+    }
+    return getFinalOutcome(false);
+  }
+
+  @Override
+  public void close() {
+    if (sv2 != null) {
+      sv2.clear();
+    }
+    super.close();
+  }
+
+  @Override
+  protected boolean setupNewSchema() throws SchemaChangeException {
+    if (sv2 != null) {
+      sv2.clear();
+    }
+
 
 Review comment:
   You need to call `container.clear()` here since in cases when any column is 
dropped and a new column is added, if `clear` is not called then older column 
will still stay. Also since this batch will send `OK_NEW_SCHEMA` downstream all 
the other operators will do the same thing and they will refresh the reference 
of input ValueVector in `incoming` recordBatch.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to