This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 0c8a5dfff1e HIVE-26578: Enable Iceberg storage format for materialized
views (Krisztian Kasa, reviewed by Denys Kuzmenko)
0c8a5dfff1e is described below
commit 0c8a5dfff1e725f6e28d9817599d5eb410af5a62
Author: Krisztian Kasa <[email protected]>
AuthorDate: Mon Dec 12 09:29:51 2022 +0100
HIVE-26578: Enable Iceberg storage format for materialized views (Krisztian
Kasa, reviewed by Denys Kuzmenko)
Closes #3823
---
.../src/test/queries/positive/mv_iceberg_orc.q | 1 +
.../src/test/queries/positive/mv_iceberg_orc2.q | 24 +++
.../test/results/positive/mv_iceberg_orc2.q.out | 187 +++++++++++++++++++++
.../java/org/apache/hadoop/hive/ql/QTestUtil.java | 2 +-
.../view/create/CreateMaterializedViewDesc.java | 10 +-
.../org/apache/hadoop/hive/ql/exec/MoveTask.java | 9 +
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 14 +-
.../apache/hadoop/hive/ql/parse/TaskCompiler.java | 25 ++-
.../org/apache/hadoop/hive/ql/plan/PlanUtils.java | 8 +
.../metastore/api/hive_metastoreConstants.java | 3 +
10 files changed, 269 insertions(+), 14 deletions(-)
diff --git a/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc.q
b/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc.q
index e040fc8e6fd..6ed6dca1e07 100644
--- a/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc.q
+++ b/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc.q
@@ -1,3 +1,4 @@
+-- MV source tables are iceberg tables but MV is not
-- SORT_QUERY_RESULTS
set hive.support.concurrency=true;
diff --git
a/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc2.q
b/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc2.q
new file mode 100644
index 00000000000..3241fa23956
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/queries/positive/mv_iceberg_orc2.q
@@ -0,0 +1,24 @@
+-- MV data is stored by iceberg
+-- SORT_QUERY_RESULTS
+
+set hive.explain.user=false;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop materialized view if exists mat1;
+drop table if exists tbl_ice;
+
+create table tbl_ice(a int, b string, c int) stored by iceberg stored as orc
tblproperties ('format-version'='1');
+insert into tbl_ice values (1, 'one', 50), (2, 'two', 51), (3, 'three', 52),
(4, 'four', 53), (5, 'five', 54);
+
+explain
+create materialized view mat1 stored by iceberg stored as orc tblproperties
('format-version'='1') as
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52;
+
+create materialized view mat1 stored by iceberg stored as orc tblproperties
('format-version'='1') as
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52;
+
+select * from mat1;
+
+explain cbo
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52;
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/mv_iceberg_orc2.q.out
b/iceberg/iceberg-handler/src/test/results/positive/mv_iceberg_orc2.q.out
new file mode 100644
index 00000000000..d01f254810b
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/results/positive/mv_iceberg_orc2.q.out
@@ -0,0 +1,187 @@
+PREHOOK: query: drop materialized view if exists mat1
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: query: drop materialized view if exists mat1
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: query: drop table if exists tbl_ice
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists tbl_ice
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tbl_ice(a int, b string, c int) stored by iceberg
stored as orc tblproperties ('format-version'='1')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_ice
+POSTHOOK: query: create table tbl_ice(a int, b string, c int) stored by
iceberg stored as orc tblproperties ('format-version'='1')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_ice
+PREHOOK: query: insert into tbl_ice values (1, 'one', 50), (2, 'two', 51), (3,
'three', 52), (4, 'four', 53), (5, 'five', 54)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_ice
+POSTHOOK: query: insert into tbl_ice values (1, 'one', 50), (2, 'two', 51),
(3, 'three', 52), (4, 'four', 53), (5, 'five', 54)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_ice
+PREHOOK: query: explain
+create materialized view mat1 stored by iceberg stored as orc tblproperties
('format-version'='1') as
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52
+PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: Input: default@tbl_ice
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mat1
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: explain
+create materialized view mat1 stored by iceberg stored as orc tblproperties
('format-version'='1') as
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52
+POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: Input: default@tbl_ice
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mat1
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+STAGE DEPENDENCIES:
+ Stage-4 is a root stage
+ Stage-5 depends on stages: Stage-4
+ Stage-1 depends on stages: Stage-5
+ Stage-2 depends on stages: Stage-1
+ Stage-3 depends on stages: Stage-0, Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-4
+ Create Materialized View
+ columns: b string, c int
+ table properties:
+ format-version 1
+ storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler
+ expanded text: select `tbl_ice`.`b`, `tbl_ice`.`c` from
`default`.`tbl_ice` where `tbl_ice`.`c` > 52
+ name: default.mat1
+ original text: select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c
> 52
+ rewrite enabled: true
+
+ Stage: Stage-5
+ Materialized View Update
+ name: default.mat1
+ retrieve and include: true
+
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tbl_ice
+ filterExpr: (c > 52) (type: boolean)
+ Statistics: Num rows: 5 Data size: 460 Basic stats: COMPLETE
Column stats: COMPLETE
+ Filter Operator
+ predicate: (c > 52) (type: boolean)
+ Statistics: Num rows: 3 Data size: 276 Basic stats:
COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: b (type: string), c (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 3 Data size: 276 Basic stats:
COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 276 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+ output format:
org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+ serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+ name: default.mat1
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: int)
+ outputColumnNames: col1, col2
+ Statistics: Num rows: 3 Data size: 276 Basic stats:
COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: max(length(col1)),
avg(COALESCE(length(col1),0)), count(1), count(col1),
compute_bit_vector_hll(col1), min(col2), max(col2), count(col2),
compute_bit_vector_hll(col2)
+ minReductionHashAggr: 0.6666666
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3,
_col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 400 Basic stats:
COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
+ Statistics: Num rows: 1 Data size: 400 Basic
stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: int), _col1 (type:
struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type:
bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: int), _col7
(type: bigint), _col8 (type: binary)
+ Execution mode: vectorized
+ Reducer 2
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0), avg(VALUE._col1),
count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4),
min(VALUE._col5), max(VALUE._col6), count(VALUE._col7),
compute_bit_vector_hll(VALUE._col8)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5,
_col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 332 Basic stats: COMPLETE
Column stats: COMPLETE
+ Select Operator
+ expressions: 'STRING' (type: string),
UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double),
(_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0)
(type: bigint), _col4 (type: binary), 'LONG' (type: string), UDFToLong(_col5)
(type: bigint), UDFToLong(_col6) (type: bigint), (_col2 - _col7) (type:
bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type:
binary)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5,
_col6, _col7, _col8, _col9, _col10, _col11
+ Statistics: Num rows: 1 Data size: 530 Basic stats: COMPLETE
Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 530 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde:
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-3
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: b, c
+ Column Types: string, int
+ Table: default.mat1
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+ destination: hdfs://### HDFS PATH ###
+
+PREHOOK: query: create materialized view mat1 stored by iceberg stored as orc
tblproperties ('format-version'='1') as
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52
+PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: Input: default@tbl_ice
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mat1
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: create materialized view mat1 stored by iceberg stored as orc
tblproperties ('format-version'='1') as
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52
+POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: Input: default@tbl_ice
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mat1
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: Lineage: mat1.b SIMPLE [(tbl_ice)tbl_ice.FieldSchema(name:b,
type:string, comment:null), ]
+POSTHOOK: Lineage: mat1.c SIMPLE [(tbl_ice)tbl_ice.FieldSchema(name:c,
type:int, comment:null), ]
+PREHOOK: query: select * from mat1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mat1
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from mat1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mat1
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+five 54
+four 53
+PREHOOK: query: explain cbo
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mat1
+PREHOOK: Input: default@tbl_ice
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: explain cbo
+select tbl_ice.b, tbl_ice.c from tbl_ice where tbl_ice.c > 52
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mat1
+POSTHOOK: Input: default@tbl_ice
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+CBO PLAN:
+HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1])
+
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 533620166fb..120b74d8853 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -363,7 +363,7 @@ public class QTestUtil {
continue;
}
db.dropTable(dbName, tblName, true, true, fsType ==
FsType.ENCRYPTED_HDFS);
- HiveMaterializedViewsRegistry.get().dropMaterializedView(tblObj);
+
HiveMaterializedViewsRegistry.get().dropMaterializedView(tblObj.getDbName(),
tblObj.getTableName());
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewDesc.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewDesc.java
index 7ab0589b71f..0d9ebff5274 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewDesc.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewDesc.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.SourceTable;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.ddl.DDLDesc;
import org.apache.hadoop.hive.ql.ddl.DDLUtils;
import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -237,11 +237,17 @@ public class CreateMaterializedViewDesc implements
DDLDesc, Serializable {
this.tblProps = tblProps;
}
- @Explain(displayName = "table properties")
public Map<String, String> getTblProps() {
return tblProps;
}
+ @Explain(displayName = "table properties")
+ public Map<String, String> getTblPropsExplain() { // only for displaying plan
+ return PlanUtils.getPropertiesForExplain(tblProps,
+ hive_metastoreConstants.TABLE_IS_CTAS,
+ hive_metastoreConstants.TABLE_BUCKETING_VERSION);
+ }
+
@Explain(displayName = "if not exists", displayOnlyOnTrue = true)
public boolean getIfNotExists() {
return ifNotExists;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index b50e48a68a7..a5a5ea793d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLUtils;
import org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc;
+import org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc;
import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask;
import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
@@ -1079,6 +1080,14 @@ public class MoveTask extends Task<MoveWork> implements
Serializable {
commitProperties = new Properties();
commitProperties.put(hive_metastoreConstants.META_TABLE_NAME,
createTableDesc.getDbTableName());
location = createTableDesc.getLocation();
+ } else {
+ CreateMaterializedViewDesc createViewDesc =
moveWork.getLoadFileWork().getCreateViewDesc();
+ if (createViewDesc != null) {
+ storageHandlerClass = createViewDesc.getStorageHandler();
+ commitProperties = new Properties();
+ commitProperties.put(hive_metastoreConstants.META_TABLE_NAME,
createViewDesc.getViewName());
+ location = createViewDesc.getLocation();
+ }
}
if (location != null) {
commitProperties.put(hive_metastoreConstants.META_TABLE_LOCATION,
location);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 4a331930d3c..75c62ca6008 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -7735,6 +7735,9 @@ public class SemanticAnalyzer extends
BaseSemanticAnalyzer {
try {
if (tblDesc == null) {
if (viewDesc != null) {
+ if (viewDesc.getStorageHandler() != null) {
+ viewDesc.setLocation(getCtasOrCMVLocation(tblDesc, viewDesc,
createTableUseSuffix).toString());
+ }
tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
} else if (qb.getIsQuery()) {
Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
@@ -7765,13 +7768,6 @@ public class SemanticAnalyzer extends
BaseSemanticAnalyzer {
throw new SemanticException(e);
}
-
- // if available, set location in table desc properties
- if (tblDesc != null && tblDesc.getLocation() != null && tableDescriptor
!= null &&
- !tableDescriptor.getProperties().containsKey(META_TABLE_LOCATION)) {
- tableDescriptor.getProperties().setProperty(META_TABLE_LOCATION,
tblDesc.getLocation());
- }
-
// We need a specific rowObjectInspector in this case
try {
specificRowObjectInspector =
@@ -14214,6 +14210,10 @@ public class SemanticAnalyzer extends
BaseSemanticAnalyzer {
}
tblProps = convertToAcidByDefault(storageFormat, dbDotTable, null,
tblProps);
}
+ if (tblProps == null) {
+ tblProps = new HashMap<>();
+ }
+ tblProps.put(hive_metastoreConstants.TABLE_IS_CTAS, "true");
createVwDesc = new CreateMaterializedViewDesc(
dbDotTable, cols, comment, tblProps, partColNames, sortColNames,
distributeColNames,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 3298156d6ba..3c37e9958fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -145,6 +145,14 @@ public abstract class TaskCompiler {
throw new SemanticException("Failed to load storage handler: " +
e.getMessage());
}
}
+ if (pCtx.getCreateViewDesc() != null &&
pCtx.getCreateViewDesc().getStorageHandler() != null) {
+ try {
+ directInsert =
+ HiveUtils.getStorageHandler(conf,
pCtx.getCreateViewDesc().getStorageHandler()).directInsert();
+ } catch (HiveException e) {
+ throw new SemanticException("Failed to load storage handler: " +
e.getMessage());
+ }
+ }
if (pCtx.getFetchTask() != null) {
if (pCtx.getFetchTask().getTblDesc() == null) {
@@ -304,18 +312,27 @@ public abstract class TaskCompiler {
}
if (directInsert) {
- Task<?> crtTask = null;
if (pCtx.getCreateTable() != null) {
CreateTableDesc crtTblDesc = pCtx.getCreateTable();
crtTblDesc.validate(conf);
- crtTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc));
- }
- if (crtTask != null) {
+ Task<?> crtTask = TaskFactory.get(new DDLWork(inputs, outputs,
crtTblDesc));
for (Task<?> rootTask : rootTasks) {
crtTask.addDependentTask(rootTask);
rootTasks.clear();
rootTasks.add(crtTask);
}
+ } else if (pCtx.getCreateViewDesc() != null) {
+ CreateMaterializedViewDesc createMaterializedViewDesc =
pCtx.getCreateViewDesc();
+ Task<?> crtTask = TaskFactory.get(new DDLWork(inputs, outputs,
createMaterializedViewDesc));
+ MaterializedViewUpdateDesc materializedViewUpdateDesc = new
MaterializedViewUpdateDesc(
+ createMaterializedViewDesc.getViewName(),
createMaterializedViewDesc.isRewriteEnabled(), false, false);
+ Task<?> updateTask = TaskFactory.get(new DDLWork(inputs, outputs,
materializedViewUpdateDesc));
+ crtTask.addDependentTask(updateTask);
+ for (Task<?> rootTask : rootTasks) {
+ updateTask.addDependentTask(rootTask);
+ rootTasks.clear();
+ rootTasks.add(crtTask);
+ }
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 947250cd815..61d3da32a3f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hive.ql.plan;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+import static
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION;
import static
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTAS;
import static org.apache.hive.common.util.HiveStringUtils.quoteComments;
@@ -355,6 +357,9 @@ public final class PlanUtils {
properties.setProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
crtTblDesc.getStorageHandler());
+ if (isNotBlank(crtTblDesc.getLocation())) {
+ properties.setProperty(META_TABLE_LOCATION,
crtTblDesc.getLocation());
+ }
}
if (crtTblDesc.getCollItemDelim() != null) {
@@ -452,6 +457,9 @@ public final class PlanUtils {
properties.setProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
crtViewDesc.getStorageHandler());
+ if (isNotBlank(crtViewDesc.getLocation())) {
+ ret.getProperties().setProperty(META_TABLE_LOCATION,
crtViewDesc.getLocation());
+ }
}
if (crtViewDesc.getViewName() != null) {
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
index 92884bdd9aa..38981623437 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java
@@ -73,6 +73,9 @@ package org.apache.hadoop.hive.metastore.api;
public static final java.lang.String JDBC_CONFIG_PREFIX = "hive.sql.";
+ /**
+ * Table is created via create table as select or create materialized view
statement
+ */
public static final java.lang.String TABLE_IS_CTAS = "created_with_ctas";
public static final java.lang.String TABLE_IS_CTLT = "created_with_ctlt";