HAWQ-964. Remove old HBaseLogic, typed Partition Comparison
Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/55db7ecd Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/55db7ecd Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/55db7ecd Branch: refs/heads/HAWQ-964 Commit: 55db7ecd0c3a7ffc365a1c850b5500053d92e4f4 Parents: 8f73c2b Author: Kavinder Dhaliwal <[email protected]> Authored: Tue Oct 4 15:17:49 2016 -0700 Committer: Kavinder Dhaliwal <[email protected]> Committed: Tue Oct 4 15:17:49 2016 -0700 ---------------------------------------------------------------------- .../pxf/plugins/hbase/HBaseFilterBuilder.java | 13 +--- .../hawq/pxf/plugins/hive/HiveAccessor.java | 68 +++++++++++++++++++- 2 files changed, 68 insertions(+), 13 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/55db7ecd/pxf/pxf-hbase/src/main/java/org/apache/hawq/pxf/plugins/hbase/HBaseFilterBuilder.java ---------------------------------------------------------------------- diff --git a/pxf/pxf-hbase/src/main/java/org/apache/hawq/pxf/plugins/hbase/HBaseFilterBuilder.java b/pxf/pxf-hbase/src/main/java/org/apache/hawq/pxf/plugins/hbase/HBaseFilterBuilder.java index 29c8686..5ec0652 100644 --- a/pxf/pxf-hbase/src/main/java/org/apache/hawq/pxf/plugins/hbase/HBaseFilterBuilder.java +++ b/pxf/pxf-hbase/src/main/java/org/apache/hawq/pxf/plugins/hbase/HBaseFilterBuilder.java @@ -240,18 +240,7 @@ public class HBaseFilterBuilder implements FilterParser.FilterBuilder { * Currently, 1, 2 can occur, since no parenthesis are used. */ private Filter handleCompoundOperations(FilterParser.LogicalOperation opId, Filter left, Filter right) { - FilterList result; - - if (left instanceof FilterList) { - result = (FilterList) left; - result.addFilter(right); - - return result; - } - - result = new FilterList(logicalOperatorsMap.get(opId), new Filter[] {left, right}); - - return result; + return new FilterList(logicalOperatorsMap.get(opId), new Filter[] {left, right}); } /** http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/55db7ecd/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java ---------------------------------------------------------------------- diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java index b2b3e4b..8cff706 100644 --- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java +++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java @@ -19,9 +19,12 @@ package org.apache.hawq.pxf.plugins.hive; * under the License. */ +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hawq.pxf.api.BasicFilter; import org.apache.hawq.pxf.api.FilterParser; import org.apache.hawq.pxf.api.LogicalFilter; +import org.apache.hawq.pxf.api.UnsupportedTypeException; import org.apache.hawq.pxf.api.utilities.ColumnDescriptor; import org.apache.hawq.pxf.api.utilities.InputData; import org.apache.hawq.pxf.plugins.hdfs.HdfsSplittableDataAccessor; @@ -33,10 +36,16 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Reporter; import java.io.IOException; +import java.sql.Date; +import java.sql.Timestamp; import java.util.Arrays; import java.util.LinkedList; import java.util.List; +import static org.apache.hawq.pxf.api.io.DataType.*; +import static org.apache.hawq.pxf.api.io.DataType.BPCHAR; +import static org.apache.hawq.pxf.api.io.DataType.BYTEA; + /** * Accessor for Hive tables. The accessor will open and read a split belonging * to a Hive table. Opening a split means creating the corresponding InputFormat @@ -247,7 +256,16 @@ public class HiveAccessor extends HdfsSplittableDataAccessor { * the filter field matches a partition field, but the values do * not match */ - return filterValue.equals(partition.val); + boolean keepPartition = filterValue.equals(partition.val); + + /* + * If the string comparison fails then we should check the comparison of + * the two operands as typed values + */ + if (!keepPartition && !partition.val.equals("__HIVE_DEFAULT_PARTITION__")){ + keepPartition = testFilterByType(filterValue, partition); + } + return keepPartition; } } @@ -261,6 +279,54 @@ public class HiveAccessor extends HdfsSplittableDataAccessor { } return partitionAllowed; } + + /* + * Given two values in String form and their type, convert each to the same type do an equality check + */ + private boolean testFilterByType(String filterValue, HivePartition partition) { + boolean result; + switch (partition.type) { + case serdeConstants.BOOLEAN_TYPE_NAME: + result = Boolean.valueOf(filterValue).equals(Boolean.valueOf(partition.val)); + break; + case serdeConstants.TINYINT_TYPE_NAME: + case serdeConstants.SMALLINT_TYPE_NAME: + result = (Short.parseShort(filterValue) == Short.parseShort(partition.val)); + break; + case serdeConstants.INT_TYPE_NAME: + result = (Integer.parseInt(filterValue) == Integer.parseInt(partition.val)); + break; + case serdeConstants.BIGINT_TYPE_NAME: + result = (Long.parseLong(filterValue) == Long.parseLong(partition.val)); + break; + case serdeConstants.FLOAT_TYPE_NAME: + result = (Float.parseFloat(filterValue) == Float.parseFloat(partition.val)); + break; + case serdeConstants.DOUBLE_TYPE_NAME: + result = (Double.parseDouble(filterValue) == Double.parseDouble(partition.val)); + break; + case serdeConstants.TIMESTAMP_TYPE_NAME: + result = Timestamp.valueOf(filterValue).equals(Timestamp.valueOf(partition.val)); + break; + case serdeConstants.DATE_TYPE_NAME: + result = Date.valueOf(filterValue).equals(Date.valueOf(partition.val)); + break; + case serdeConstants.DECIMAL_TYPE_NAME: + result = HiveDecimal.create(filterValue).bigDecimalValue().equals(HiveDecimal.create(partition.val).bigDecimalValue()); + break; + case serdeConstants.BINARY_TYPE_NAME: + result = filterValue.getBytes().equals(partition.val.getBytes()); + break; + case serdeConstants.STRING_TYPE_NAME: + case serdeConstants.VARCHAR_TYPE_NAME: + case serdeConstants.CHAR_TYPE_NAME: + default: + result = false; + } + + return result; + } + /* * We are testing one filter against all the partition fields. The filter * has the form "fieldA = valueA". The partitions have the form
