Repository: hive Updated Branches: refs/heads/master fc5a5a636 -> 8cb99d680
HIVE-19633 : Remove/Migrate Minimr tests (Prasanth J via Sergey Shelukhin) Signed-off-by: Ashutosh Chauhan <hashut...@apache.org> Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8cb99d68 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8cb99d68 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8cb99d68 Branch: refs/heads/master Commit: 8cb99d680b1688ebfa617483f3010b217885ae90 Parents: fc5a5a6 Author: Prasanth Jayachandran <prasan...@apache.org> Authored: Sun Jun 3 09:23:57 2018 -0700 Committer: Ashutosh Chauhan <hashut...@apache.org> Committed: Sun Jun 3 09:23:57 2018 -0700 ---------------------------------------------------------------------- .../hadoop/hive/cli/TestMinimrCliDriver.java | 2 ++ .../test/resources/testconfiguration.properties | 27 +++------------ itests/util/pom.xml | 5 +++ .../hive/ql/hooks/VerifyNumReducersHook.java | 25 +++++++++++--- .../exec/tez/monitoring/TezProgressMonitor.java | 10 +++--- .../clientpositive/root_dir_external_table.q | 3 +- ql/src/test/queries/clientpositive/udf_using.q | 8 ++--- .../infer_bucket_sort_dyn_part.q.out | 4 +-- .../infer_bucket_sort_map_operators.q.out | 8 ++--- .../llap/bucket_num_reducers_acid.q.out | 35 ++++++++++++++++++++ .../clientpositive/parallel_orderby.q.out | 10 +++--- .../root_dir_external_table.q.out | 28 ---------------- .../results/clientpositive/scriptfile1.q.out | 4 +-- .../test/results/clientpositive/udf_using.q.out | 11 +++--- 14 files changed, 98 insertions(+), 82 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMinimrCliDriver.java ---------------------------------------------------------------------- diff --git a/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMinimrCliDriver.java b/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMinimrCliDriver.java index 417687c..f525671 100644 --- a/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMinimrCliDriver.java +++ b/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMinimrCliDriver.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.hadoop.hive.cli.control.CliAdapter; import org.apache.hadoop.hive.cli.control.CliConfigs; import org.junit.ClassRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestRule; @@ -54,6 +55,7 @@ public class TestMinimrCliDriver { this.qfile = qfile; } + @Ignore @Test public void testCliDriver() throws Exception { adapter.runTest(name, qfile); http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/itests/src/test/resources/testconfiguration.properties ---------------------------------------------------------------------- diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 6a70a4a..fa1a4fb 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -1,26 +1,7 @@ # Note: the *.shared groups also run on TestCliDriver -# NOTE: files should be listed in alphabetical order -minimr.query.files=infer_bucket_sort_map_operators.q,\ - infer_bucket_sort_dyn_part.q,\ - infer_bucket_sort_merge.q,\ - infer_bucket_sort_reducers_power_two.q,\ - infer_bucket_sort_num_buckets.q,\ - root_dir_external_table.q,\ - parallel_orderby.q,\ - udf_using.q,\ - index_bitmap3.q,\ - index_bitmap_auto.q,\ - scriptfile1.q,\ - bucket_num_reducers_acid.q,\ - scriptfile1_win.q - -# These tests are disabled for minimr -# ql_rewrite_gbtoidx.q,\ -# ql_rewrite_gbtoidx_cbo_1.q,\ -# ql_rewrite_gbtoidx_cbo_2.q,\ -# smb_mapjoin_8.q,\ - +# DO NOT USE minimr, as MR is deprecated and MinimrCliDriver will be removed +minimr.query.files=doesnotexist.q\ # Tests that are not enabled for CLI Driver disabled.query.files=ql_rewrite_gbtoidx.q,\ @@ -37,7 +18,8 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\ union_stats.q,\ sample2.q,\ sample4.q,\ - sample6.q + sample6.q,\ + root_dir_external_table.q # NOTE: Add tests to minitez only if it is very @@ -433,6 +415,7 @@ minillap.query.files=acid_bucket_pruning.q,\ cttl.q minillaplocal.query.files=\ + bucket_num_reducers_acid.q,\ dp_counter_non_mm.q,\ dp_counter_mm.q,\ acid_no_buckets.q, \ http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/itests/util/pom.xml ---------------------------------------------------------------------- diff --git a/itests/util/pom.xml b/itests/util/pom.xml index 7b70560..9334f90 100644 --- a/itests/util/pom.xml +++ b/itests/util/pom.xml @@ -176,6 +176,11 @@ <artifactId>hbase-mapreduce</artifactId> <version>${hbase.version}</version> </dependency> + <dependency> + <groupId>org.apache.tez</groupId> + <artifactId>tez-api</artifactId> + <version>${tez.version}</version> + </dependency> <!-- test inter-project --> <dependency> <groupId>junit</groupId> http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java ---------------------------------------------------------------------- diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java index 6172ca9..a9a25f8 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java @@ -21,9 +21,13 @@ import java.util.Map; import junit.framework.Assert; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.MapRedStats; +import org.apache.hadoop.hive.ql.exec.tez.monitoring.TezProgressMonitor; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.tez.dag.api.client.Progress; + /** * * VerifyNumReducersHook. @@ -42,10 +46,23 @@ public class VerifyNumReducersHook implements ExecuteWithHookContext { Assert.assertNotNull("SessionState returned null"); int expectedReducers = hookContext.getConf().getInt(BUCKET_CONFIG, 0); - Map<String, MapRedStats> stats = ss.getMapRedStats(); - Assert.assertEquals("Number of MapReduce jobs is incorrect", 1, stats.size()); + if (ss.getConf().get(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname).equals("mr")) { + Map<String, MapRedStats> stats = ss.getMapRedStats(); + Assert.assertEquals("Number of MapReduce jobs is incorrect", 1, stats.size()); - MapRedStats stat = stats.values().iterator().next(); - Assert.assertEquals("NumReducers is incorrect", expectedReducers, stat.getNumReduce()); + MapRedStats stat = stats.values().iterator().next(); + Assert.assertEquals("NumReducers is incorrect", expectedReducers, stat.getNumReduce()); + } else if (ss.getConf().get(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname).equals("tez")) { + TezProgressMonitor tezProgressMonitor = (TezProgressMonitor) ss.getProgressMonitor(); + Map<String, Progress> progressMap = tezProgressMonitor.getStatus().getVertexProgress(); + int totalReducers = 0; + for (Map.Entry<String, Progress> entry : progressMap.entrySet()) { + // relying on the name of vertex is fragile, but this will do for now for the tests + if (entry.getKey().contains("Reducer")) { + totalReducers += entry.getValue().getTotalTaskCount(); + } + } + Assert.assertEquals("Number of reducers is incorrect", expectedReducers, totalReducers); + } } } http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java index b0c1659..735442d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezProgressMonitor.java @@ -35,12 +35,10 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; import static org.apache.tez.dag.api.client.DAGStatus.State.KILLED; -class TezProgressMonitor implements ProgressMonitor { +public class TezProgressMonitor implements ProgressMonitor { private static final int COLUMN_1_WIDTH = 16; private final List<BaseWork> topSortedWork; private final SessionState.LogHelper console; @@ -211,7 +209,7 @@ class TezProgressMonitor implements ProgressMonitor { return mode; } - static class VertexProgress { + public static class VertexProgress { private final int totalTaskCount; private final int succeededTaskCount; private final int failedTaskAttemptCount; @@ -328,4 +326,8 @@ class TezProgressMonitor implements ProgressMonitor { return result; } } + + public DAGStatus getStatus() { + return status; + } } http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/queries/clientpositive/root_dir_external_table.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/root_dir_external_table.q b/ql/src/test/queries/clientpositive/root_dir_external_table.q index 7763ce2..66a50e6 100644 --- a/ql/src/test/queries/clientpositive/root_dir_external_table.q +++ b/ql/src/test/queries/clientpositive/root_dir_external_table.q @@ -1,5 +1,6 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; +set hive.llap.io.enabled=false; dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_root_dir_external_table; insert overwrite directory "hdfs:///tmp/test_root_dir_external_table" select key from src where (key < 20) order by key; @@ -10,4 +11,4 @@ dfs -rmr hdfs:///tmp/test_root_dir_external_table; create external table roottable_n0 (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs:///'; select count(*) from roottable_n0; -dfs -rmr /000000_0; \ No newline at end of file +dfs -rmr /000000_0; http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/queries/clientpositive/udf_using.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/udf_using.q b/ql/src/test/queries/clientpositive/udf_using.q index d5e9206..732f965 100644 --- a/ql/src/test/queries/clientpositive/udf_using.q +++ b/ql/src/test/queries/clientpositive/udf_using.q @@ -1,9 +1,9 @@ --! qt:dataset:src -dfs ${system:test.dfs.mkdir} hdfs:///tmp/udf_using; +dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/udf_using; -dfs -copyFromLocal ../../data/files/sales.txt hdfs:///tmp/udf_using/sales.txt; +dfs -copyFromLocal ../../data/files/sales.txt ${system:test.tmp.dir}/udf_using/sales.txt; -create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'hdfs:///tmp/udf_using/sales.txt'; +create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file '${system:test.tmp.dir}/udf_using/sales.txt'; create table udf_using (c1 string); insert overwrite table udf_using select 'Joe' from src limit 2; @@ -13,4 +13,4 @@ select c1, lookup(c1) from udf_using; drop table udf_using; drop function lookup; -dfs -rmr hdfs:///tmp/udf_using; +dfs -rmr ${system:test.tmp.dir}/udf_using; http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out index 1af3ea5..8d813a4 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out @@ -488,7 +488,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -529,7 +529,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### Stage: Stage-8 Map Reduce http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out index 63f6f5c..7b790cc 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out @@ -133,7 +133,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -183,7 +183,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') SELECT key, count(*) FROM test_table1_n14 GROUP BY key @@ -526,7 +526,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -576,7 +576,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/llap/bucket_num_reducers_acid.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/bucket_num_reducers_acid.q.out b/ql/src/test/results/clientpositive/llap/bucket_num_reducers_acid.q.out new file mode 100644 index 0000000..81ba5ae --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/bucket_num_reducers_acid.q.out @@ -0,0 +1,35 @@ +PREHOOK: query: drop table if exists bucket_nr_acid +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists bucket_nr_acid +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket_nr_acid (a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_nr_acid +POSTHOOK: query: create table bucket_nr_acid (a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_nr_acid +PREHOOK: query: insert into bucket_nr_acid values(1,1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@bucket_nr_acid +PREHOOK: query: insert into bucket_nr_acid values(0,0),(3,3) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@bucket_nr_acid +PREHOOK: query: update bucket_nr_acid set b = -1 +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_nr_acid +PREHOOK: Output: default@bucket_nr_acid +PREHOOK: query: select * from bucket_nr_acid order by a, b +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_nr_acid +#### A masked pattern was here #### +0 -1 +1 -1 +3 -1 +PREHOOK: query: drop table bucket_nr_acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket_nr_acid +PREHOOK: Output: default@bucket_nr_acid http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/parallel_orderby.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/parallel_orderby.q.out b/ql/src/test/results/clientpositive/parallel_orderby.q.out index 73a103d..d60a0a6 100644 --- a/ql/src/test/results/clientpositive/parallel_orderby.q.out +++ b/ql/src/test/results/clientpositive/parallel_orderby.q.out @@ -68,7 +68,7 @@ STAGE PLANS: Move Operator files: hdfs directory: true - destination: hdfs://### HDFS PATH ### +#### A masked pattern was here #### Stage: Stage-3 Create Table Operator: @@ -134,11 +134,11 @@ Storage Desc Params: PREHOOK: query: select * from total_ordered PREHOOK: type: QUERY PREHOOK: Input: default@total_ordered -PREHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### POSTHOOK: query: select * from total_ordered POSTHOOK: type: QUERY POSTHOOK: Input: default@total_ordered -POSTHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### 128 val_128 128 val_128 150 val_150 @@ -245,11 +245,11 @@ Storage Desc Params: PREHOOK: query: select * from total_ordered PREHOOK: type: QUERY PREHOOK: Input: default@total_ordered -PREHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### POSTHOOK: query: select * from total_ordered POSTHOOK: type: QUERY POSTHOOK: Input: default@total_ordered -POSTHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### 128 val_128 128 val_128 150 val_150 http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/root_dir_external_table.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/root_dir_external_table.q.out b/ql/src/test/results/clientpositive/root_dir_external_table.q.out deleted file mode 100644 index 7049406..0000000 --- a/ql/src/test/results/clientpositive/root_dir_external_table.q.out +++ /dev/null @@ -1,28 +0,0 @@ -PREHOOK: query: insert overwrite directory "hdfs://### HDFS PATH ###" select key from src where (key < 20) order by key -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: insert overwrite directory "hdfs://### HDFS PATH ###" select key from src where (key < 20) order by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -PREHOOK: query: create external table roottable_n0 (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs://### HDFS PATH ###' -PREHOOK: type: CREATETABLE -PREHOOK: Input: hdfs://### HDFS PATH ### -PREHOOK: Output: database:default -PREHOOK: Output: default@roottable_n0 -POSTHOOK: query: create external table roottable_n0 (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs://### HDFS PATH ###' -POSTHOOK: type: CREATETABLE -POSTHOOK: Input: hdfs://### HDFS PATH ### -POSTHOOK: Output: database:default -POSTHOOK: Output: default@roottable_n0 -PREHOOK: query: select count(*) from roottable_n0 -PREHOOK: type: QUERY -PREHOOK: Input: default@roottable_n0 -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: select count(*) from roottable_n0 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@roottable_n0 -POSTHOOK: Output: hdfs://### HDFS PATH ### -20 -#### A masked pattern was here #### http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/scriptfile1.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/scriptfile1.q.out b/ql/src/test/results/clientpositive/scriptfile1.q.out index f374501..29ee2df 100644 --- a/ql/src/test/results/clientpositive/scriptfile1.q.out +++ b/ql/src/test/results/clientpositive/scriptfile1.q.out @@ -31,11 +31,11 @@ POSTHOOK: Lineage: dest1_n22.value SCRIPT [(src)src.FieldSchema(name:key, type:s PREHOOK: query: SELECT dest1_n22.* FROM dest1_n22 PREHOOK: type: QUERY PREHOOK: Input: default@dest1_n22 -PREHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### POSTHOOK: query: SELECT dest1_n22.* FROM dest1_n22 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1_n22 -POSTHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### 10 val_10 100 val_100 100 val_100 http://git-wip-us.apache.org/repos/asf/hive/blob/8cb99d68/ql/src/test/results/clientpositive/udf_using.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/udf_using.q.out b/ql/src/test/results/clientpositive/udf_using.q.out index 8f92101..a226b79 100644 --- a/ql/src/test/results/clientpositive/udf_using.q.out +++ b/ql/src/test/results/clientpositive/udf_using.q.out @@ -1,13 +1,12 @@ -PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'hdfs://### HDFS PATH ###' +#### A masked pattern was here #### PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:default PREHOOK: Output: default.lookup -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'hdfs://### HDFS PATH ###' +#### A masked pattern was here #### POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: database:default POSTHOOK: Output: default.lookup -POSTHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### PREHOOK: query: create table udf_using (c1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -28,11 +27,11 @@ POSTHOOK: Lineage: udf_using.c1 SIMPLE [] PREHOOK: query: select c1, lookup(c1) from udf_using PREHOOK: type: QUERY PREHOOK: Input: default@udf_using -PREHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### POSTHOOK: query: select c1, lookup(c1) from udf_using POSTHOOK: type: QUERY POSTHOOK: Input: default@udf_using -POSTHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### Joe 2 Joe 2 PREHOOK: query: drop table udf_using