[ 
https://issues.apache.org/jira/browse/HIVE-12971?focusedWorklogId=280841&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-280841
 ]

ASF GitHub Bot logged work on HIVE-12971:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 23/Jul/19 06:56
            Start Date: 23/Jul/19 06:56
    Worklog Time Spent: 10m 
      Work Description: jcamachor commented on pull request #733: HIVE-12971: 
Add Support for Kudu Tables
URL: https://github.com/apache/hive/pull/733#discussion_r306145974
 
 

 ##########
 File path: 
kudu-handler/src/java/org/apache/hadoop/hive/kudu/KuduPredicateHandler.java
 ##########
 @@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.kudu;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.Timestamp;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
+import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer;
+import org.apache.hadoop.hive.ql.index.IndexSearchCondition;
+import 
org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.client.KuduPredicate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Contains static methods for decomposing predicate/filter expressions and
+ * getting the equivalent Kudu predicates.
+ */
+public final class KuduPredicateHandler {
+  static final Logger LOG = 
LoggerFactory.getLogger(KuduPredicateHandler.class);
+
+  private KuduPredicateHandler() {}
+
+  /**
+   * Analyzes the predicates and return the portion of it which
+   * cannot be evaluated by Kudu during table access.
+   *
+   * @param predicateExpr predicate to be decomposed
+   * @param schema the schema of the Kudu table
+   * @return decomposed form of predicate, or null if no pushdown is possible 
at all
+   */
+  public static DecomposedPredicate decompose(ExprNodeDesc predicateExpr, 
Schema schema) {
+    IndexPredicateAnalyzer analyzer = newAnalyzer(schema);
+    List<IndexSearchCondition> sConditions = new ArrayList<>();
+    ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicateExpr, 
sConditions);
+
+    // Nothing to decompose.
+    if (sConditions.size() == 0) {
+      return null;
+    }
+
+    DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
+    decomposedPredicate.pushedPredicate = 
analyzer.translateSearchConditions(sConditions);
+    decomposedPredicate.residualPredicate = (ExprNodeGenericFuncDesc) 
residualPredicate;
+    return decomposedPredicate;
+  }
+
+  /**
+   * Returns the list of Kudu predicates from the passed configuration.
+   *
+   * @param conf the execution configuration
+   * @param schema the schema of the Kudu table
+   * @return the list of Kudu predicates
+   */
+  public static List<KuduPredicate> getPredicates(Configuration conf, Schema 
schema) {
+    List<KuduPredicate> predicates = new ArrayList<>();
+    for (IndexSearchCondition sc : getSearchConditions(conf, schema)) {
+      predicates.add(conditionToPredicate(sc, schema));
+    }
+    return predicates;
+  }
+
+  private static List<IndexSearchCondition> getSearchConditions(Configuration 
conf, Schema schema) {
+    List<IndexSearchCondition> conditions = new ArrayList<>();
+    ExprNodeDesc filterExpr = getExpression(conf);
+    if (null == filterExpr) {
+      return conditions;
+    }
+    IndexPredicateAnalyzer analyzer = newAnalyzer(schema);
+    ExprNodeDesc residual = analyzer.analyzePredicate(filterExpr, conditions);
+
+    // There should be no residual since we already negotiated that earlier in
+    // decomposePredicate. However, with hive.optimize.index.filter
+    // OpProcFactory#pushFilterToStorageHandler pushes the original filter 
back down again.
+    // Since pushed-down filters are not omitted at the higher levels (and 
thus the
+    // contract of negotiation is ignored anyway), just ignore the residuals.
+    // Re-assess this when negotiation is honored and the duplicate evaluation 
is removed.
+    if (residual != null) {
+      LOG.debug("Ignoring residual predicate " + residual.getExprString());
+    }
+
+    return conditions;
+  }
+
+  private static ExprNodeDesc getExpression(Configuration conf) {
+    String filteredExprSerialized = 
conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
+    if (filteredExprSerialized == null) {
+      return null;
+    }
+    return 
SerializationUtilities.deserializeExpression(filteredExprSerialized);
+  }
+
+  private static KuduPredicate conditionToPredicate(IndexSearchCondition 
condition, Schema schema) {
+    ColumnSchema column = 
schema.getColumn(condition.getColumnDesc().getColumn());
+    GenericUDF genericUDF = condition.getOriginalExpr().getGenericUDF();
+    Object value = condition.getConstantDesc().getValue();
+
+    // Convert special Hive types.
+    if (value instanceof HiveDecimal) {
+      value = ((HiveDecimal) value).bigDecimalValue();
+    } else if (value instanceof Timestamp) {
+      value = ((Timestamp) value).toSqlTimestamp();
 
 Review comment:
   I am not sure whether Kudu uses `local datetime` or `instant` for timestamp 
semantics (Hive uses `local datetime` in timestamp and `instant` in timestamp 
with local time zone, see 
https://cwiki.apache.org/confluence/display/Hive/Different+TIMESTAMP+types).
   Note that this method just creates a `java.sql.Timestamp` with millis since 
epoch utc, i.e., calling `toString` method may not get you the correct String 
representation depending on the system timezone (you need to use a formatter).
   Hive tests are run in PST. To verify that filtering on those values is 
working properly, insert timestamp values in `kudu_queries.q` that have less 
than 8 hours difference between them (in current tests, one value is in `2011` 
and another one in `2012`, which would not expose this issue even if it exists).
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 280841)
    Time Spent: 1h 40m  (was: 1.5h)

> Hive Support for Kudu
> ---------------------
>
>                 Key: HIVE-12971
>                 URL: https://issues.apache.org/jira/browse/HIVE-12971
>             Project: Hive
>          Issue Type: New Feature
>    Affects Versions: 2.0.0
>            Reporter: Lenni Kuff
>            Assignee: Grant Henke
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HIVE-12971.0.patch, HIVE-12971.1.patch, 
> HIVE-12971.2.patch, HIVE-12971.3.patch
>
>          Time Spent: 1h 40m
>  Remaining Estimate: 0h
>
> JIRA for tracking work related to Hive/Kudu integration.
> It would be useful to allow Kudu data to be accessible via Hive. This would 
> involve creating a Kudu SerDe/StorageHandler and implementing support for 
> QUERY and DML commands like SELECT, INSERT, UPDATE, and DELETE. Kudu 
> Input/OutputFormats classes already exist. The work can be staged to support 
> this functionality incrementally.



--
This message was sent by Atlassian JIRA
(v7.6.14#76016)

Reply via email to