HIVE-11846: CliDriver shutdown tries to drop index table again which was already dropped when dropping the original table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68c0e999 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68c0e999 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68c0e999 Branch: refs/heads/beeline-cli Commit: 68c0e9993c6aee85d50ce1dc8974916b1e073f67 Parents: b934a80 Author: Pengcheng Xiong <[email protected]> Authored: Fri Sep 18 10:29:52 2015 -0700 Committer: Pengcheng Xiong <[email protected]> Committed: Fri Sep 18 10:29:52 2015 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/hive/ql/QTestUtil.java | 9 +- .../clientpositive/drop_table_with_index.q | 35 +++++ .../clientpositive/drop_table_with_index.q.out | 152 +++++++++++++++++++ 3 files changed, 195 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/68c0e999/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java ---------------------------------------------------------------------- diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 3fae0ba..f23bf2b 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl; import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton; import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; @@ -670,7 +671,13 @@ public class QTestUtil { SessionState.get().setCurrentDatabase(dbName); for (String tblName : db.getAllTables()) { if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) { - Table tblObj = db.getTable(tblName); + Table tblObj = null; + try { + tblObj = db.getTable(tblName); + } catch (InvalidTableException e) { + LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist."); + continue; + } // dropping index table can not be dropped directly. Dropping the base // table will automatically drop all its index table if(tblObj.isIndexTable()) { http://git-wip-us.apache.org/repos/asf/hive/blob/68c0e999/ql/src/test/queries/clientpositive/drop_table_with_index.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/drop_table_with_index.q b/ql/src/test/queries/clientpositive/drop_table_with_index.q new file mode 100644 index 0000000..1790664 --- /dev/null +++ b/ql/src/test/queries/clientpositive/drop_table_with_index.q @@ -0,0 +1,35 @@ +set hive.stats.dbclass=fs; +set hive.stats.autogather=true; +set hive.cbo.enable=true; + +DROP TABLE IF EXISTS aa; +CREATE TABLE aa (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|'; + +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa; + +CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)"); +ALTER INDEX aa_lshipdate_idx ON aa REBUILD; + +show tables; + +explain select l_shipdate, count(l_shipdate) +from aa +group by l_shipdate; + http://git-wip-us.apache.org/repos/asf/hive/blob/68c0e999/ql/src/test/results/clientpositive/drop_table_with_index.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/drop_table_with_index.q.out b/ql/src/test/results/clientpositive/drop_table_with_index.q.out new file mode 100644 index 0000000..d1b0d6d --- /dev/null +++ b/ql/src/test/results/clientpositive/drop_table_with_index.q.out @@ -0,0 +1,152 @@ +PREHOOK: query: DROP TABLE IF EXISTS aa +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS aa +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE aa (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@aa +POSTHOOK: query: CREATE TABLE aa (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@aa +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@aa +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@aa +PREHOOK: query: CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)") +PREHOOK: type: CREATEINDEX +PREHOOK: Input: default@aa +POSTHOOK: query: CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)") +POSTHOOK: type: CREATEINDEX +POSTHOOK: Input: default@aa +POSTHOOK: Output: default@default__aa_aa_lshipdate_idx__ +PREHOOK: query: ALTER INDEX aa_lshipdate_idx ON aa REBUILD +PREHOOK: type: ALTERINDEX_REBUILD +PREHOOK: Input: default@aa +PREHOOK: Output: default@default__aa_aa_lshipdate_idx__ +POSTHOOK: query: ALTER INDEX aa_lshipdate_idx ON aa REBUILD +POSTHOOK: type: ALTERINDEX_REBUILD +POSTHOOK: Input: default@aa +POSTHOOK: Output: default@default__aa_aa_lshipdate_idx__ +POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._bucketname SIMPLE [(aa)aa.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._count_of_l_shipdate EXPRESSION [(aa)aa.FieldSchema(name:l_shipdate, type:string, comment:null), ] +POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._offsets EXPRESSION [(aa)aa.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__.l_shipdate SIMPLE [(aa)aa.FieldSchema(name:l_shipdate, type:string, comment:null), ] +PREHOOK: query: show tables +PREHOOK: type: SHOWTABLES +PREHOOK: Input: database:default +POSTHOOK: query: show tables +POSTHOOK: type: SHOWTABLES +POSTHOOK: Input: database:default +aa +alltypesorc +cbo_t1 +cbo_t2 +cbo_t3 +default__aa_aa_lshipdate_idx__ +lineitem +part +src +src1 +src_cbo +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: explain select l_shipdate, count(l_shipdate) +from aa +group by l_shipdate +PREHOOK: type: QUERY +POSTHOOK: query: explain select l_shipdate, count(l_shipdate) +from aa +group by l_shipdate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: aa + Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: l_shipdate (type: string) + outputColumnNames: l_shipdate + Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(l_shipdate) + keys: l_shipdate (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 60 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 60 Data size: 6049 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink +
