IMPALA-1654: General partition exprs in DDL operations.
This commit handles partition related DDL in a more general way. We can
now use compound predicates to specify a list of partitions in
statements like ALTER TABLE DROP PARTITION and COMPUTE INCREMENTAL
STATS, etc. It will also make sure some statements only accept one
partition at a time, such as PARTITION SET LOCATION and LOAD DATA. ALTER
TABLE ADD PARTITION remains using the old PartitionKeyValue's logic.
The changed partition related DDLs are as follows,
Table: p (i int) partitioned by (j int, k string)
Partitions:
+-------+---+-------+--------+------+--------------+-------------------+
| j | k | #Rows | #Files | Size | Bytes Cached | Cache Replication |
+-------+---+-------+--------+------+--------------+-------------------+
| 1 | a | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| 1 | b | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| 1 | c | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| 2 | d | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| 2 | e | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| 2 | f | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| Total | | -1 | 0 | 0B | 0B | |
+-------+---+-------+--------+------+--------------+-------------------+
1. show files in p partition (j<2, k='a');
2. alter table p partition (j<2, k in ("b","c") set cached in 'testPool';
// j can appear more than once,
3.1. alter table p partition (j<2, j>0, k<>"d") set uncached;
// it is the same as
3.2. alter table p partition (j<2 and j>0, not k="e") set uncached;
// we can also do 'or'
3.3. alter table p partition (j<2 or j>0, k like "%") set uncached;
// missing 'k' matches all values of k
4. alter table p partition (j<2) set fileformat textfile;
5. alter table p partition (k rlike ".*") set serdeproperties ("k"="v");
6. alter table p partition (j is not null) set tblproperties ("k"="v");
7. alter table p drop partition (j<2);
8. compute incremental stats p partition(j<2);
The remaining old partition related DDLs are as follows,
1. load data inpath '/path/from' into table p partition (j=2, k="d");
2. alter table p add partition (j=2, k="g");
3. alter table p partition (j=2, k="g") set location '/path/to';
4. insert into p partition (j=2, k="g") values (1), (2), (3);
General partition expressions or partially specified partition specs
allows partition predicates to return empty partition set no matter
'IF EXISTS' is specified.
Examples:
[localhost.localdomain:21000] >
alter table p drop partition (j=2, k="f");
Query: alter table p drop partition (j=2, k="f")
+-------------------------+
| summary |
+-------------------------+
| Dropped 1 partition(s). |
+-------------------------+
Fetched 1 row(s) in 0.78s
[localhost.localdomain:21000] >
alter table p drop partition (j=2, k<"f");
Query: alter table p drop partition (j=2, k<"f")
+-------------------------+
| summary |
+-------------------------+
| Dropped 2 partition(s). |
+-------------------------+
Fetched 1 row(s) in 0.41s
[localhost.localdomain:21000] >
alter table p drop partition (k="a");
Query: alter table p drop partition (k="a")
+-------------------------+
| summary |
+-------------------------+
| Dropped 1 partition(s). |
+-------------------------+
Fetched 1 row(s) in 0.25s
[localhost.localdomain:21000] > show partitions p;
Query: show partitions p
+-------+---+-------+--------+------+--------------+-------------------+
| j | k | #Rows | #Files | Size | Bytes Cached | Cache Replication |
+-------+---+-------+--------+------+--------------+-------------------+
| 1 | b | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| 1 | c | -1 | 0 | 0B | NOT CACHED | NOT CACHED |
| Total | | -1 | 0 | 0B | 0B | |
+-------+---+-------+--------+------+--------------+-------------------+
Fetched 3 row(s) in 0.01s
Change-Id: I2c9162fcf9d227b8daf4c2e761d57bab4e26408f
Reviewed-on: http://gerrit.cloudera.org:8080/3942
Reviewed-by: Alex Behm <[email protected]>
Tested-by: Internal Jenkins
Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/628685ae
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/628685ae
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/628685ae
Branch: refs/heads/master
Commit: 628685ae749daa133c9b7a6184c5523136979974
Parents: 3f2f008
Author: Amos Bird <[email protected]>
Authored: Wed Nov 25 06:20:15 2015 -0800
Committer: Internal Jenkins <[email protected]>
Committed: Tue Nov 15 03:27:36 2016 +0000
----------------------------------------------------------------------
be/src/service/query-exec-state.cc | 19 +-
be/src/service/query-exec-state.h | 1 +
common/thrift/CatalogService.thrift | 2 +-
common/thrift/Frontend.thrift | 6 +-
common/thrift/JniCatalog.thrift | 24 +-
fe/src/main/cup/sql-parser.cup | 62 ++-
.../analysis/AlterTableDropPartitionStmt.java | 22 +-
.../analysis/AlterTableSetCachedStmt.java | 30 +-
.../analysis/AlterTableSetFileFormatStmt.java | 12 +-
.../analysis/AlterTableSetLocationStmt.java | 50 ++-
.../impala/analysis/AlterTableSetStmt.java | 18 +-
.../analysis/AlterTableSetTblProperties.java | 8 +-
.../apache/impala/analysis/AlterTableStmt.java | 24 +-
.../org/apache/impala/analysis/Analyzer.java | 24 +-
.../apache/impala/analysis/BaseTableRef.java | 2 +-
.../impala/analysis/CollectionTableRef.java | 2 +-
.../impala/analysis/ComputeStatsStmt.java | 105 +++--
.../apache/impala/analysis/DropStatsStmt.java | 55 ++-
.../apache/impala/analysis/InlineViewRef.java | 5 +-
.../apache/impala/analysis/PartitionSet.java | 199 +++++++++
.../apache/impala/analysis/PartitionSpec.java | 62 +--
.../impala/analysis/PartitionSpecBase.java | 94 +++++
.../apache/impala/analysis/ShowFilesStmt.java | 44 +-
.../org/apache/impala/analysis/TableRef.java | 12 +-
.../apache/impala/analysis/TupleDescriptor.java | 14 +
.../impala/catalog/CatalogServiceCatalog.java | 19 +
.../org/apache/impala/catalog/HdfsTable.java | 80 +++-
.../java/org/apache/impala/catalog/Table.java | 4 +
.../impala/planner/HdfsPartitionPruner.java | 19 +-
.../impala/planner/SingleNodePlanner.java | 2 +-
.../impala/service/CatalogOpExecutor.java | 402 +++++++++++--------
.../org/apache/impala/service/Frontend.java | 2 +-
.../apache/impala/analysis/AnalyzeDDLTest.java | 334 +++++++++------
.../impala/analysis/AnalyzeStmtsTest.java | 2 +-
.../org/apache/impala/analysis/ParserTest.java | 17 +-
shell/impala_client.py | 2 +-
.../queries/QueryTest/alter-table.test | 45 +++
.../queries/QueryTest/compute-stats.test | 2 +
.../queries/QueryTest/hdfs-caching.test | 2 +-
.../queries/QueryTest/kudu_alter.test | 6 +
.../QueryTest/partition-ddl-predicates.test | 156 +++++++
tests/metadata/test_ddl.py | 4 +
42 files changed, 1398 insertions(+), 596 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/be/src/service/query-exec-state.cc
----------------------------------------------------------------------
diff --git a/be/src/service/query-exec-state.cc
b/be/src/service/query-exec-state.cc
index bfb5a86..6075914 100644
--- a/be/src/service/query-exec-state.cc
+++ b/be/src/service/query-exec-state.cc
@@ -516,6 +516,9 @@ Status ImpalaServer::QueryExecState::ExecDdlRequest() {
DCHECK(exec_request_.__isset.query_exec_request);
RETURN_IF_ERROR(ExecQueryOrDmlRequest(exec_request_.query_exec_request));
}
+
+ // Set the results to be reported to the client.
+ SetResultSet(catalog_op_executor_->ddl_exec_response());
return Status::OK();
}
@@ -919,6 +922,13 @@ Status ImpalaServer::QueryExecState::UpdateCatalog() {
return Status::OK();
}
+void ImpalaServer::QueryExecState::SetResultSet(const TDdlExecResponse*
ddl_resp) {
+ if (ddl_resp != NULL && ddl_resp->__isset.result_set) {
+ result_metadata_ = ddl_resp->result_set.schema;
+ request_result_set_.reset(new
vector<TResultRow>(ddl_resp->result_set.rows));
+ }
+}
+
void ImpalaServer::QueryExecState::SetResultSet(const vector<string>& results)
{
request_result_set_.reset(new vector<TResultRow>);
request_result_set_->resize(results.size());
@@ -1026,14 +1036,7 @@ Status
ImpalaServer::QueryExecState::UpdateTableAndColumnStats(
exec_request_.query_options.sync_ddl));
// Set the results to be reported to the client.
- const TDdlExecResponse* ddl_resp = catalog_op_executor_->ddl_exec_response();
- if (ddl_resp != NULL && ddl_resp->__isset.result_set) {
- result_metadata_ = ddl_resp->result_set.schema;
- request_result_set_.reset(new vector<TResultRow>);
- request_result_set_->assign(
- ddl_resp->result_set.rows.begin(), ddl_resp->result_set.rows.end());
- }
-
+ SetResultSet(catalog_op_executor_->ddl_exec_response());
query_events_->MarkEvent("Metastore update finished");
return Status::OK();
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/be/src/service/query-exec-state.h
----------------------------------------------------------------------
diff --git a/be/src/service/query-exec-state.h
b/be/src/service/query-exec-state.h
index 55e3ffa..28c8cba 100644
--- a/be/src/service/query-exec-state.h
+++ b/be/src/service/query-exec-state.h
@@ -374,6 +374,7 @@ class ImpalaServer::QueryExecState {
/// Copies results into request_result_set_
/// TODO: Have the FE return list<Data.TResultRow> so that this isn't
necessary
+ void SetResultSet(const TDdlExecResponse* ddl_resp);
void SetResultSet(const std::vector<std::string>& results);
void SetResultSet(const std::vector<std::string>& col1,
const std::vector<std::string>& col2);
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/common/thrift/CatalogService.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/CatalogService.thrift
b/common/thrift/CatalogService.thrift
index ec4bde2..188f71a 100644
--- a/common/thrift/CatalogService.thrift
+++ b/common/thrift/CatalogService.thrift
@@ -145,7 +145,7 @@ struct TDdlExecResponse {
2: optional bool new_table_created;
// Result of DDL operation to be returned to the client. Currently only set
- // by COMPUTE STATS.
+ // by COMPUTE STATS and ALTER TABLE.
3: optional Results.TResultSet result_set
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/common/thrift/Frontend.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/Frontend.thrift b/common/thrift/Frontend.thrift
index 8c279f5..6fdf972 100644
--- a/common/thrift/Frontend.thrift
+++ b/common/thrift/Frontend.thrift
@@ -209,9 +209,9 @@ struct TShowTablesParams {
struct TShowFilesParams {
1: required CatalogObjects.TTableName table_name
- // An optional partition spec. Set if this operation should apply to a
specific
- // partition rather than the base table.
- 2: optional list<CatalogObjects.TPartitionKeyValue> partition_spec
+ // An optional partition set. Set if this operation should apply to a list of
+ // partitions rather than the base table.
+ 2: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set
}
// Parameters for SHOW [CURRENT] ROLES and SHOW ROLE GRANT GROUP <groupName>
commands
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/common/thrift/JniCatalog.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift
index 8ed9fe3..7fa7f7b 100644
--- a/common/thrift/JniCatalog.thrift
+++ b/common/thrift/JniCatalog.thrift
@@ -111,10 +111,10 @@ struct TDropStatsParams {
// Fully qualified name of the target table
1: required CatalogObjects.TTableName table_name
- // If set, delete the stats only for a particular partition, but do not
recompute the
+ // If set, delete the stats only for specified partitions, but do not
recompute the
// stats for the whole table. This is set only for
// DROP INCREMENTAL STATS <table> PARTITION(...)
- 2: optional list<CatalogObjects.TPartitionKeyValue> partition_spec
+ 2: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set
}
// Parameters of CREATE FUNCTION commands
@@ -194,8 +194,8 @@ struct TAlterTableDropColParams {
// Parameters for ALTER TABLE DROP PARTITION commands
struct TAlterTableDropPartitionParams {
- // The partition spec (list of keys and values) to add.
- 1: required list<CatalogObjects.TPartitionKeyValue> partition_spec
+ // The partition set used to drop partitions.
+ 1: required list<list<CatalogObjects.TPartitionKeyValue>> partition_set
// If true, no error is raised if no partition with the specified spec
exists.
2: required bool if_exists
@@ -222,18 +222,18 @@ struct TAlterTableSetTblPropertiesParams {
// Map of property names to property values.
2: required map<string, string> properties
- // If set, alters the properties of the given partition, otherwise
+ // If set, alters the properties of the given partitions, otherwise
// those of the table.
- 3: optional list<CatalogObjects.TPartitionKeyValue> partition_spec
+ 3: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set
}
-// Parameters for ALTER TABLE SET [PARTITION partitionSpec] FILEFORMAT
commands.
+// Parameters for ALTER TABLE SET [PARTITION partitionSet] FILEFORMAT commands.
struct TAlterTableSetFileFormatParams {
// New file format.
1: required CatalogObjects.THdfsFileFormat file_format
- // An optional partition spec, set if modifying the fileformat of a
partition.
- 2: optional list<CatalogObjects.TPartitionKeyValue> partition_spec
+ // An optional partition set, set if modifying the fileformat of the
partitions.
+ 2: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set
}
// Parameters for ALTER TABLE SET [PARTITION partitionSpec] location commands.
@@ -270,14 +270,14 @@ struct TAlterTableUpdateStatsParams {
6: optional bool is_incremental
}
-// Parameters for ALTER TABLE SET [PARTITION partitionSpec] CACHED|UNCACHED
+// Parameters for ALTER TABLE SET [PARTITION partitionSet] CACHED|UNCACHED
struct TAlterTableSetCachedParams {
// Details on what operation to perform (cache or uncache)
1: required THdfsCachingOp cache_op
- // An optional partition spec, set if marking a partition as cached/uncached
+ // An optional partition set, set if marking the partitions as
cached/uncached
// rather than a table.
- 2: optional list<CatalogObjects.TPartitionKeyValue> partition_spec
+ 2: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set
}
// Parameters for all ALTER TABLE commands.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/cup/sql-parser.cup
----------------------------------------------------------------------
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index 57d5a24..1884036 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -370,6 +370,10 @@ nonterminal StatementBase explain_stmt;
nonterminal PartitionSpec opt_partition_spec;
// Required partition spec
nonterminal PartitionSpec partition_spec;
+// Optional partition set
+nonterminal PartitionSet opt_partition_set;
+// Required partition set
+nonterminal PartitionSet partition_set;
nonterminal ArrayList<PartitionKeyValue> partition_clause;
nonterminal ArrayList<PartitionKeyValue> static_partition_key_value_list;
nonterminal ArrayList<PartitionKeyValue> partition_key_value_list;
@@ -904,34 +908,39 @@ alter_tbl_stmt ::=
column_def:col_def
{: RESULT = new AlterTableChangeColStmt(table, col_name, col_def); :}
| KW_ALTER KW_TABLE table_name:table KW_DROP if_exists_val:if_exists
- partition_spec:partition purge_val:purge
- {: RESULT = new AlterTableDropPartitionStmt(table, partition, if_exists,
purge); :}
- | KW_ALTER KW_TABLE table_name:table opt_partition_spec:partition KW_SET
KW_FILEFORMAT
+ partition_set:partitions purge_val:purge
+ {: RESULT = new AlterTableDropPartitionStmt(table, partitions, if_exists,
purge); :}
+ | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET
KW_FILEFORMAT
file_format_val:file_format
- {: RESULT = new AlterTableSetFileFormatStmt(table, partition, file_format);
:}
- | KW_ALTER KW_TABLE table_name:table opt_partition_spec:partition KW_SET
+ {: RESULT = new AlterTableSetFileFormatStmt(table, partitions, file_format);
:}
+ | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET
KW_LOCATION STRING_LITERAL:location
- {: RESULT = new AlterTableSetLocationStmt(table, partition, new
HdfsUri(location)); :}
+ {:
+ // Need to check in analysis that the partition set only matches a single
partition.
+ // Avoids a reduce/reduce conflict and allows the user to select a
partition without
+ // fully specifying all partition-key values.
+ RESULT = new AlterTableSetLocationStmt(table, partitions, new
HdfsUri(location));
+ :}
| KW_ALTER KW_TABLE table_name:table KW_RENAME KW_TO table_name:new_table
{: RESULT = new AlterTableOrViewRenameStmt(table, new_table, true); :}
- | KW_ALTER KW_TABLE table_name:table opt_partition_spec:partition KW_SET
+ | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET
table_property_type:target LPAREN properties_map:properties RPAREN
- {: RESULT = new AlterTableSetTblProperties(table, partition, target,
properties); :}
- | KW_ALTER KW_TABLE table_name:table opt_partition_spec:partition KW_SET
+ {: RESULT = new AlterTableSetTblProperties(table, partitions, target,
properties); :}
+ | KW_ALTER KW_TABLE table_name:table opt_partition_set:partition KW_SET
KW_COLUMN KW_STATS IDENT:col LPAREN properties_map:map RPAREN
{:
- // The opt_partition_spec is used to avoid conflicts even though
+ // The opt_partition_set is used to avoid conflicts even though
// a partition clause does not make sense for this stmt. If a partition
// is given, manually throw a parse error.
if (partition != null) parser.parseError("set", SqlParserSymbols.KW_SET);
RESULT = new AlterTableSetColumnStats(table, col, map);
:}
- | KW_ALTER KW_TABLE table_name:table opt_partition_spec:partition KW_SET
+ | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET
cache_op_val:cache_op
{:
// Ensure a parser error is thrown for ALTER statements if no cache op is
specified.
if (cache_op == null) parser.parseError("set", SqlParserSymbols.KW_SET);
- RESULT = new AlterTableSetCachedStmt(table, partition, cache_op);
+ RESULT = new AlterTableSetCachedStmt(table, partitions, cache_op);
:}
| KW_ALTER KW_TABLE table_name:table KW_RECOVER KW_PARTITIONS
{: RESULT = new AlterTableRecoverPartitionsStmt(table); :}
@@ -1550,15 +1559,15 @@ compute_stats_stmt ::=
{: RESULT = new ComputeStatsStmt(table); :}
| KW_COMPUTE KW_INCREMENTAL KW_STATS table_name:table
{: RESULT = new ComputeStatsStmt(table, true, null); :}
- | KW_COMPUTE KW_INCREMENTAL KW_STATS table_name:table partition_spec:spec
- {: RESULT = new ComputeStatsStmt(table, true, spec); :}
+ | KW_COMPUTE KW_INCREMENTAL KW_STATS table_name:table
partition_set:partitions
+ {: RESULT = new ComputeStatsStmt(table, true, partitions); :}
;
drop_stats_stmt ::=
KW_DROP KW_STATS table_name:table
{: RESULT = new DropStatsStmt(table); :}
- | KW_DROP KW_INCREMENTAL KW_STATS table_name:table partition_spec:spec
- {: RESULT = new DropStatsStmt(table, spec); :}
+ | KW_DROP KW_INCREMENTAL KW_STATS table_name:table partition_set:partitions
+ {: RESULT = new DropStatsStmt(table, partitions); :}
;
drop_db_stmt ::=
@@ -1626,6 +1635,23 @@ partition_key_value_list ::=
:}
;
+// TODO: reuse this for INSERT statement.
+// A partition set is a set of expressions used to select a list of partitions
+// for certain operation such as DROP PARTITION. This is different than a
partition
+// clause in an INSERT statement. Partition clause contains dynamic and static
+// partition key/values.
+partition_set ::=
+ KW_PARTITION LPAREN expr_list:list RPAREN
+ {: RESULT = new PartitionSet(list); :}
+ ;
+
+opt_partition_set ::=
+ partition_set:partition_set
+ {: RESULT = partition_set; :}
+ | /* Empty */
+ {: RESULT = null; :}
+ ;
+
// A partition spec is a set of static partition key/value pairs. This is a bit
// different than a partition clause in an INSERT statement because that allows
// for dynamic and static partition key/values.
@@ -2020,8 +2046,8 @@ show_create_function_stmt ::=
;
show_files_stmt ::=
- KW_SHOW KW_FILES KW_IN table_name:table opt_partition_spec:partition
- {: RESULT = new ShowFilesStmt(table, partition); :}
+ KW_SHOW KW_FILES KW_IN table_name:table opt_partition_set:partitions
+ {: RESULT = new ShowFilesStmt(table, partitions); :}
;
describe_db_stmt ::=
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
index 773fea4..9eb580d 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -29,18 +29,18 @@ import com.google.common.base.Preconditions;
*/
public class AlterTableDropPartitionStmt extends AlterTableStmt {
private final boolean ifExists_;
- private final PartitionSpec partitionSpec_;
+ private final PartitionSet partitionSet_;
// Setting this value causes dropped partition(s) to be permanently
// deleted. For example, for HDFS tables it skips the trash mechanism
private final boolean purgePartition_;
public AlterTableDropPartitionStmt(TableName tableName,
- PartitionSpec partitionSpec, boolean ifExists, boolean purgePartition) {
+ PartitionSet partitionSet, boolean ifExists, boolean purgePartition) {
super(tableName);
- Preconditions.checkNotNull(partitionSpec);
- partitionSpec_ = partitionSpec;
- partitionSpec_.setTableName(tableName);
+ Preconditions.checkNotNull(partitionSet);
+ partitionSet_ = partitionSet;
+ partitionSet_.setTableName(tableName);
ifExists_ = ifExists;
purgePartition_ = purgePartition;
}
@@ -52,7 +52,7 @@ public class AlterTableDropPartitionStmt extends
AlterTableStmt {
StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
sb.append(" DROP ");
if (ifExists_) sb.append("IF EXISTS ");
- sb.append(" DROP " + partitionSpec_.toSql());
+ sb.append(" DROP " + partitionSet_.toSql());
if (purgePartition_) sb.append(" PURGE");
return sb.toString();
}
@@ -62,8 +62,8 @@ public class AlterTableDropPartitionStmt extends
AlterTableStmt {
TAlterTableParams params = super.toThrift();
params.setAlter_type(TAlterTableType.DROP_PARTITION);
TAlterTableDropPartitionParams addPartParams = new
TAlterTableDropPartitionParams();
- addPartParams.setPartition_spec(partitionSpec_.toThrift());
- addPartParams.setIf_exists(ifExists_);
+ addPartParams.setPartition_set(partitionSet_.toThrift());
+ addPartParams.setIf_exists(!partitionSet_.getPartitionShouldExist());
addPartParams.setPurge(purgePartition_);
params.setDrop_partition_params(addPartParams);
return params;
@@ -72,8 +72,8 @@ public class AlterTableDropPartitionStmt extends
AlterTableStmt {
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
super.analyze(analyzer);
- if (!ifExists_) partitionSpec_.setPartitionShouldExist();
- partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
- partitionSpec_.analyze(analyzer);
+ if (!ifExists_) partitionSet_.setPartitionShouldExist();
+ partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
+ partitionSet_.analyze(analyzer);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
index 56122e1..931c6a9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
@@ -17,6 +17,8 @@
package org.apache.impala.analysis;
+import java.util.List;
+
import org.apache.impala.catalog.HdfsTable;
import org.apache.impala.catalog.HdfsPartition;
import org.apache.impala.catalog.Table;
@@ -27,14 +29,14 @@ import org.apache.impala.thrift.TAlterTableType;
import com.google.common.base.Preconditions;
/**
- * Represents an ALTER TABLE [PARTITION partitionSpec] SET [UNCACHED|CACHED
'pool'].
+ * Represents an ALTER TABLE [PARTITION partitionSet] SET [UNCACHED|CACHED
'pool'].
*/
public class AlterTableSetCachedStmt extends AlterTableSetStmt {
private final HdfsCachingOp cacheOp_;
public AlterTableSetCachedStmt(TableName tableName,
- PartitionSpec partitionSpec, HdfsCachingOp cacheOp) {
- super(tableName, partitionSpec);
+ PartitionSet partitionSet, HdfsCachingOp cacheOp) {
+ super(tableName, partitionSet);
Preconditions.checkNotNull(cacheOp);
cacheOp_ = cacheOp;
}
@@ -45,8 +47,8 @@ public class AlterTableSetCachedStmt extends
AlterTableSetStmt {
params.setAlter_type(TAlterTableType.SET_CACHED);
TAlterTableSetCachedParams cachingParams =
new TAlterTableSetCachedParams();
- if (getPartitionSpec() != null) {
- cachingParams.setPartition_spec(getPartitionSpec().toThrift());
+ if (getPartitionSet() != null) {
+ cachingParams.setPartition_set(getPartitionSet().toThrift());
}
cachingParams.setCache_op(cacheOp_.toThrift());
params.setSet_cached_params(cachingParams);
@@ -66,18 +68,18 @@ public class AlterTableSetCachedStmt extends
AlterTableSetStmt {
}
if (cacheOp_.shouldCache()) {
- boolean isCacheable;
- PartitionSpec partSpec = getPartitionSpec();
+ boolean isCacheable = true;
+ PartitionSet partitionSet = getPartitionSet();
HdfsTable hdfsTable = (HdfsTable)table;
StringBuilder nameSb = new StringBuilder();
- if (partSpec != null) {
- HdfsPartition part =
hdfsTable.getPartition(partSpec.getPartitionSpecKeyValues());
- if (part == null) {
- throw new AnalysisException("Partition spec does not exist: " +
- partSpec.toSql());
+ if (partitionSet != null) {
+ List<HdfsPartition> parts = partitionSet.getPartitions();
+ nameSb.append("Partition(s) (");
+ for(HdfsPartition part: parts) {
+ isCacheable = isCacheable && part.isCacheable();
+ if(!part.isCacheable()) nameSb.append(part.getPartitionName());
}
- isCacheable = part.isCacheable();
- nameSb.append("Partition (" + part.getPartitionName() + ")");
+ nameSb.append(")");
} else {
isCacheable = hdfsTable.isCacheable();
nameSb.append("Table ").append(table.getFullName());
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
index ff8d878..5b53b80 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -24,14 +24,14 @@ import org.apache.impala.thrift.TAlterTableType;
import org.apache.impala.thrift.THdfsFileFormat;
/**
- * Represents an ALTER TABLE [PARTITION partitionSpec] SET FILEFORMAT
statement.
+ * Represents an ALTER TABLE [PARTITION partitionSet] SET FILEFORMAT statement.
*/
public class AlterTableSetFileFormatStmt extends AlterTableSetStmt {
private final THdfsFileFormat fileFormat_;
public AlterTableSetFileFormatStmt(TableName tableName,
- PartitionSpec partitionSpec, THdfsFileFormat fileFormat) {
- super(tableName, partitionSpec);
+ PartitionSet partitionSet, THdfsFileFormat fileFormat) {
+ super(tableName, partitionSet);
this.fileFormat_ = fileFormat;
}
@@ -43,8 +43,8 @@ public class AlterTableSetFileFormatStmt extends
AlterTableSetStmt {
params.setAlter_type(TAlterTableType.SET_FILE_FORMAT);
TAlterTableSetFileFormatParams fileFormatParams =
new TAlterTableSetFileFormatParams(fileFormat_);
- if (getPartitionSpec() != null) {
- fileFormatParams.setPartition_spec(getPartitionSpec().toThrift());
+ if (getPartitionSet() != null) {
+ fileFormatParams.setPartition_set(getPartitionSet().toThrift());
}
params.setSet_file_format_params(fileFormatParams);
return params;
@@ -54,4 +54,4 @@ public class AlterTableSetFileFormatStmt extends
AlterTableSetStmt {
public void analyze(Analyzer analyzer) throws AnalysisException {
super.analyze(analyzer);
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index 0219b8c..d7a7448 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -17,6 +17,10 @@
package org.apache.impala.analysis;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.impala.authorization.Privilege;
import org.apache.impala.catalog.HdfsPartition;
import org.apache.impala.catalog.HdfsTable;
@@ -25,18 +29,23 @@ import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableParams;
import org.apache.impala.thrift.TAlterTableSetLocationParams;
import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.permission.FsAction;
+import com.google.common.collect.Lists;
/**
* Represents an ALTER TABLE [PARTITION partitionSpec] SET LOCATION statement.
*/
public class AlterTableSetLocationStmt extends AlterTableSetStmt {
+ // max num of partitions printed during error reporting.
+ private static final int NUM_PARTITION_LOG_LIMIT = 3;
private final HdfsUri location_;
public AlterTableSetLocationStmt(TableName tableName,
- PartitionSpec partitionSpec, HdfsUri location) {
- super(tableName, partitionSpec);
+ PartitionSet partitionSet, HdfsUri location) {
+ super(tableName, partitionSet);
Preconditions.checkNotNull(location);
this.location_ = location;
}
@@ -49,8 +58,10 @@ public class AlterTableSetLocationStmt extends
AlterTableSetStmt {
params.setAlter_type(TAlterTableType.SET_LOCATION);
TAlterTableSetLocationParams locationParams =
new TAlterTableSetLocationParams(location_.toString());
- if (getPartitionSpec() != null) {
- locationParams.setPartition_spec(getPartitionSpec().toThrift());
+ if (getPartitionSet() != null) {
+ List<List<TPartitionKeyValue>> tPartitionSet =
getPartitionSet().toThrift();
+ Preconditions.checkState(tPartitionSet.size() == 1);
+ locationParams.setPartition_spec(tPartitionSet.get(0));
}
params.setSet_location_params(locationParams);
return params;
@@ -65,16 +76,31 @@ public class AlterTableSetLocationStmt extends
AlterTableSetStmt {
Preconditions.checkNotNull(table);
if (table instanceof HdfsTable) {
HdfsTable hdfsTable = (HdfsTable) table;
- if (getPartitionSpec() != null) {
+ if (getPartitionSet() != null) {
// Targeting a partition rather than a table.
- PartitionSpec partitionSpec = getPartitionSpec();
- HdfsPartition partition = hdfsTable.getPartition(
- partitionSpec.getPartitionSpecKeyValues());
- Preconditions.checkNotNull(partition);
- if (partition.isMarkedCached()) {
+ List<HdfsPartition> partitions = getPartitionSet().getPartitions();
+ if (partitions.size() != 1) {
+ // Sort the partitions to get a consistent error reporting.
+ List<HdfsPartition> sortedPartitions =
Lists.newArrayList(partitions);
+ Collections.sort(sortedPartitions);
+ List<String> sortedPartitionNames =
+ Lists.transform(sortedPartitions.subList(0,
NUM_PARTITION_LOG_LIMIT),
+ new Function<HdfsPartition, String>() {
+ @Override
+ public String apply(HdfsPartition hdfsPartition) {
+ return hdfsPartition.getPartitionName();
+ }
+ });
+ throw new AnalysisException(String.format(
+ "Partition expr in set location statements can only match " +
+ "one partition. Too many matched partitions %s %s",
+ Joiner.on(",").join(sortedPartitionNames),
+ sortedPartitions.size() < partitions.size() ? "..." : "."));
+ }
+ if (partitions.get(0).isMarkedCached()) {
throw new AnalysisException(String.format("Target partition is
cached, " +
"please uncache before changing the location using: ALTER TABLE
%s %s " +
- "SET UNCACHED", table.getFullName(), partitionSpec.toSql()));
+ "SET UNCACHED", table.getFullName(), getPartitionSet().toSql()));
}
} else if (hdfsTable.isMarkedCached()) {
throw new AnalysisException(String.format("Target table is cached,
please " +
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
index a37b949..178d28b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetStmt.java
@@ -26,15 +26,15 @@ import org.apache.impala.common.AnalysisException;
* Base class for all ALTER TABLE ... SET statements
*/
public class AlterTableSetStmt extends AlterTableStmt {
- protected final PartitionSpec partitionSpec_;
+ protected final PartitionSet partitionSet_;
- public AlterTableSetStmt(TableName tableName, PartitionSpec partitionSpec) {
+ public AlterTableSetStmt(TableName tableName, PartitionSet partitionSet) {
super(tableName);
- partitionSpec_ = partitionSpec;
- if (partitionSpec_ != null) partitionSpec_.setTableName(tableName);
+ partitionSet_ = partitionSet;
+ if (partitionSet_ != null) partitionSet_.setTableName(tableName);
}
- public PartitionSpec getPartitionSpec() { return partitionSpec_; }
+ PartitionSet getPartitionSet() { return partitionSet_; }
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
@@ -49,10 +49,10 @@ public class AlterTableSetStmt extends AlterTableStmt {
}
// Altering the table rather than the partition.
- if (partitionSpec_ == null) return;
+ if (partitionSet_ == null) return;
- partitionSpec_.setPartitionShouldExist();
- partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
- partitionSpec_.analyze(analyzer);
+ partitionSet_.setPartitionShouldExist();
+ partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
+ partitionSet_.analyze(analyzer);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
index f2a81cd..08007b3 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetTblProperties.java
@@ -47,9 +47,9 @@ public class AlterTableSetTblProperties extends
AlterTableSetStmt {
private final TTablePropertyType targetProperty_;
private final HashMap<String, String> tblProperties_;
- public AlterTableSetTblProperties(TableName tableName, PartitionSpec
partitionSpec,
+ public AlterTableSetTblProperties(TableName tableName, PartitionSet
partitionSet,
TTablePropertyType targetProperty, HashMap<String, String>
tblProperties) {
- super(tableName, partitionSpec);
+ super(tableName, partitionSet);
Preconditions.checkNotNull(tblProperties);
Preconditions.checkNotNull(targetProperty);
targetProperty_ = targetProperty;
@@ -67,8 +67,8 @@ public class AlterTableSetTblProperties extends
AlterTableSetStmt {
new TAlterTableSetTblPropertiesParams();
tblPropertyParams.setTarget(targetProperty_);
tblPropertyParams.setProperties(tblProperties_);
- if (partitionSpec_ != null) {
- tblPropertyParams.setPartition_spec(partitionSpec_.toThrift());
+ if (partitionSet_ != null) {
+ tblPropertyParams.setPartition_set(partitionSet_.toThrift());
}
params.setSet_tbl_properties_params(tblPropertyParams);
return params;
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index 223abe9..d86448b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -69,23 +69,33 @@ public abstract class AlterTableStmt extends StatementBase {
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
- table_ = analyzer.getTable(tableName_, Privilege.ALTER);
+ // Resolve and analyze this table ref so we can evaluate partition
predicates.
+ TableRef tableRef = new TableRef(tableName_.toPath(), null,
Privilege.ALTER);
+ tableRef = analyzer.resolveTableRef(tableRef);
+ Preconditions.checkNotNull(tableRef);
+ tableRef.analyze(analyzer);
+ if (tableRef instanceof InlineViewRef) {
+ throw new AnalysisException(String.format(
+ "ALTER TABLE not allowed on a view: %s", tableName_));
+ }
+ if (tableRef instanceof CollectionTableRef) {
+ throw new AnalysisException(String.format(
+ "ALTER TABLE not allowed on a nested collection: %s", tableName_));
+ }
+ Preconditions.checkState(tableRef instanceof BaseTableRef);
+ table_ = tableRef.getTable();
if (table_ instanceof KuduTable
&& !(this instanceof AlterTableSetTblProperties)
&& !(this instanceof AlterTableSetColumnStats)
&& !(this instanceof AlterTableOrViewRenameStmt)) {
throw new AnalysisException(String.format(
- "ALTER TABLE not allowed on Kudu table: %s", table_.getFullName()));
- }
- if (table_ instanceof View) {
- throw new AnalysisException(String.format(
- "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
+ "ALTER TABLE not allowed on Kudu table: %s", tableName_));
}
if (table_ instanceof DataSourceTable
&& !(this instanceof AlterTableSetColumnStats)) {
throw new AnalysisException(String.format(
"ALTER TABLE not allowed on a table produced by a data source: %s",
- table_.getFullName()));
+ tableName_));
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index 5a34fbc..0638bed 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -553,11 +553,11 @@ public class Analyzer {
if (rawPath.size() > 1) {
registerPrivReq(new PrivilegeRequestBuilder()
.onTable(rawPath.get(0), rawPath.get(1))
- .allOf(Privilege.SELECT).toRequest());
+ .allOf(tableRef.getPrivilege()).toRequest());
}
registerPrivReq(new PrivilegeRequestBuilder()
.onTable(getDefaultDb(), rawPath.get(0))
- .allOf(Privilege.SELECT).toRequest());
+ .allOf(tableRef.getPrivilege()).toRequest());
}
throw e;
} catch (TableLoadingException e) {
@@ -2568,28 +2568,28 @@ public class Analyzer {
}
/**
- * Registers a table-level SELECT privilege request and an access event for
auditing
- * for the given table. The given table must be a base table or a catalog
view (not
- * a local view).
+ * Registers a table-level privilege request and an access event for auditing
+ * for the given table and privilege. The table must be a base table or a
+ * catalog view (not a local view).
*/
- public void registerAuthAndAuditEvent(Table table, Analyzer analyzer) {
+ public void registerAuthAndAuditEvent(Table table, Privilege priv) {
// Add access event for auditing.
if (table instanceof View) {
View view = (View) table;
Preconditions.checkState(!view.isLocalView());
- analyzer.addAccessEvent(new TAccessEvent(
+ addAccessEvent(new TAccessEvent(
table.getFullName(), TCatalogObjectType.VIEW,
- Privilege.SELECT.toString()));
+ priv.toString()));
} else {
- analyzer.addAccessEvent(new TAccessEvent(
+ addAccessEvent(new TAccessEvent(
table.getFullName(), TCatalogObjectType.TABLE,
- Privilege.SELECT.toString()));
+ priv.toString()));
}
// Add privilege request.
TableName tableName = table.getTableName();
- analyzer.registerPrivReq(new PrivilegeRequestBuilder()
+ registerPrivReq(new PrivilegeRequestBuilder()
.onTable(tableName.getDb(), tableName.getTbl())
- .allOf(Privilege.SELECT).toRequest());
+ .allOf(priv).toRequest());
}
/**
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
index fd1c455..1691315 100644
--- a/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/BaseTableRef.java
@@ -58,7 +58,7 @@ public class BaseTableRef extends TableRef {
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
if (isAnalyzed_) return;
- analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), analyzer);
+ analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), priv_);
desc_ = analyzer.registerTableRef(this);
isAnalyzed_ = true;
analyzeHints(analyzer);
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
index e8abaa8..05a3543 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CollectionTableRef.java
@@ -103,7 +103,7 @@ public class CollectionTableRef extends TableRef {
// Register a table-level privilege request as well as a column-level
privilege request
// for the collection-typed column.
Preconditions.checkNotNull(resolvedPath_.getRootTable());
- analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(),
analyzer);
+ analyzer.registerAuthAndAuditEvent(resolvedPath_.getRootTable(), priv_);
analyzer.registerPrivReq(new PrivilegeRequestBuilder().
allOf(Privilege.SELECT).onColumn(desc_.getTableName().getDb(),
desc_.getTableName().getTbl(), desc_.getPath().getRawPath().get(0))
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 5ad3cad..7aface4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -21,8 +21,6 @@ import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.log4j.Logger;
-
import org.apache.impala.authorization.Privilege;
import org.apache.impala.catalog.Column;
import org.apache.impala.catalog.HBaseTable;
@@ -30,13 +28,14 @@ import org.apache.impala.catalog.HdfsPartition;
import org.apache.impala.catalog.HdfsTable;
import org.apache.impala.catalog.Table;
import org.apache.impala.catalog.Type;
-import org.apache.impala.catalog.View;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.common.PrintUtils;
import org.apache.impala.service.BackendConfig;
import org.apache.impala.thrift.TComputeStatsParams;
import org.apache.impala.thrift.TPartitionStats;
import org.apache.impala.thrift.TTableName;
+import org.apache.log4j.Logger;
+
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
@@ -99,9 +98,9 @@ public class ComputeStatsStmt extends StatementBase {
// PARTITION_ID() builtin)
private final List<List<String>> expectedPartitions_ = Lists.newArrayList();
- // If non-null, the partition that an incremental computation might apply
to. Must be
+ // If non-null, partitions that an incremental computation might apply to.
Must be
// null if this is a non-incremental computation.
- private PartitionSpec partitionSpec_ = null;
+ private PartitionSet partitionSet_ = null;
// The maximum number of partitions that may be explicitly selected by filter
// predicates. Any query that selects more than this automatically drops
back to a full
@@ -120,20 +119,20 @@ public class ComputeStatsStmt extends StatementBase {
/**
* Constructor for the incremental form of COMPUTE STATS. If isIncremental
is true,
* statistics will be recomputed incrementally; if false they will be
recomputed for the
- * whole table. The partition spec partSpec can specify a single partition
whose stats
- * should be recomputed.
+ * whole table. The partition set partitionSet can specify a list of
partitions whose
+ * stats should be recomputed.
*/
protected ComputeStatsStmt(TableName tableName, boolean isIncremental,
- PartitionSpec partSpec) {
+ PartitionSet partitionSet) {
Preconditions.checkState(tableName != null && !tableName.isEmpty());
- Preconditions.checkState(isIncremental || partSpec == null);
+ Preconditions.checkState(isIncremental || partitionSet == null);
this.tableName_ = tableName;
this.table_ = null;
this.isIncremental_ = isIncremental;
- this.partitionSpec_ = partSpec;
- if (partitionSpec_ != null) {
- partitionSpec_.setTableName(tableName);
- partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+ this.partitionSet_ = partitionSet;
+ if (partitionSet_ != null) {
+ partitionSet_.setTableName(tableName);
+ partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
}
}
@@ -245,17 +244,25 @@ public class ComputeStatsStmt extends StatementBase {
*/
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
- table_ = analyzer.getTable(tableName_, Privilege.ALTER);
- String sqlTableName = table_.getTableName().toSql();
- if (table_ instanceof View) {
+ // Resolve and analyze this table ref so we can evaluate partition
predicates.
+ TableRef tableRef = new TableRef(tableName_.toPath(), null,
Privilege.ALTER);
+ tableRef = analyzer.resolveTableRef(tableRef);
+ Preconditions.checkNotNull(tableRef);
+ tableRef.analyze(analyzer);
+ if (tableRef instanceof InlineViewRef) {
+ throw new AnalysisException(String.format(
+ "COMPUTE STATS not supported for view: %s", tableName_));
+ }
+ if (tableRef instanceof CollectionTableRef) {
throw new AnalysisException(String.format(
- "COMPUTE STATS not supported for view %s", sqlTableName));
+ "COMPUTE STATS not supported for nested collection: %s",
tableName_));
}
+ table_ = analyzer.getTable(tableName_, Privilege.ALTER);
if (!(table_ instanceof HdfsTable)) {
- if (partitionSpec_ != null) {
+ if (partitionSet_ != null) {
throw new AnalysisException("COMPUTE INCREMENTAL ... PARTITION not
supported " +
- "for non-HDFS table " + table_.getTableName());
+ "for non-HDFS table " + tableName_);
}
isIncremental_ = false;
}
@@ -267,22 +274,14 @@ public class ComputeStatsStmt extends StatementBase {
if (table_ instanceof HdfsTable) {
hdfsTable = (HdfsTable)table_;
if (isIncremental_ && hdfsTable.getNumClusteringCols() == 0 &&
- partitionSpec_ != null) {
- throw new AnalysisException(String.format(
- "Can't compute PARTITION stats on an unpartitioned table: %s",
- sqlTableName));
- } else if (partitionSpec_ != null) {
- partitionSpec_.setPartitionShouldExist();
- partitionSpec_.analyze(analyzer);
- for (PartitionKeyValue kv:
partitionSpec_.getPartitionSpecKeyValues()) {
- // TODO: We could match the dynamic keys (i.e. as wildcards) as
well, but that
- // would involve looping over all partitions and seeing which
match the
- // partition spec.
- if (!kv.isStatic()) {
- throw new AnalysisException("All partition keys must have
values: " +
- kv.toString());
- }
- }
+ partitionSet_ != null) {
+ throw new AnalysisException(String.format(
+ "Can't compute PARTITION stats on an unpartitioned table: %s",
+ tableName_));
+ } else if (partitionSet_ != null) {
+ Preconditions.checkState(tableRef instanceof BaseTableRef);
+ partitionSet_.setPartitionShouldExist();
+ partitionSet_.analyze(analyzer);
}
// For incremental stats, estimate the size of intermediate stats and
report an
// error if the estimate is greater than --inc_stats_size_limit_bytes in
bytes
@@ -305,7 +304,7 @@ public class ComputeStatsStmt extends StatementBase {
// incremental computation.
List<String> filterPreds = Lists.newArrayList();
if (isIncremental_) {
- if (partitionSpec_ == null) {
+ if (partitionSet_ == null) {
// If any column does not have stats, we recompute statistics for all
partitions
// TODO: need a better way to invalidate stats for all partitions, so
that we can
// use this logic to only recompute new / changed columns.
@@ -355,23 +354,23 @@ public class ComputeStatsStmt extends StatementBase {
expectAllPartitions_ = true;
}
} else {
- // Always compute stats on a particular partition when told to.
+ List<HdfsPartition> targetPartitions = partitionSet_.getPartitions();
+
+ // Always compute stats on a set of partitions when told to.
List<String> partitionConjuncts = Lists.newArrayList();
- for (PartitionKeyValue kv: partitionSpec_.getPartitionSpecKeyValues())
{
- partitionConjuncts.add(kv.toPredicateSql());
+ for (HdfsPartition targetPartition : targetPartitions) {
+ partitionConjuncts.add(targetPartition.getConjunctSql());
+ List<String> partValues = Lists.newArrayList();
+ for (LiteralExpr partValue: targetPartition.getPartitionValues()) {
+
partValues.add(PartitionKeyValue.getPartitionKeyValueString(partValue,
+ "NULL"));
+ }
+ expectedPartitions_.add(partValues);
}
filterPreds.add("(" + Joiner.on(" AND ").join(partitionConjuncts) +
")");
- HdfsPartition targetPartition =
- hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
- List<String> partValues = Lists.newArrayList();
- for (LiteralExpr partValue: targetPartition.getPartitionValues()) {
-
partValues.add(PartitionKeyValue.getPartitionKeyValueString(partValue,
- "NULL"));
- }
- expectedPartitions_.add(partValues);
- for (HdfsPartition p: hdfsTable.getPartitions()) {
+ for (HdfsPartition p : hdfsTable.getPartitions()) {
if (p.isDefaultPartition()) continue;
- if (p == targetPartition) continue;
+ if (targetPartitions.contains(p)) continue;
TPartitionStats partStats = p.getPartitionStats();
if (partStats != null) validPartStats_.add(partStats);
}
@@ -408,7 +407,7 @@ public class ComputeStatsStmt extends StatementBase {
tableStatsSelectList.addAll(partitionColsSelectList);
tableStatsQueryBuilder.append(Joiner.on(", ").join(tableStatsSelectList));
- tableStatsQueryBuilder.append(" FROM " + sqlTableName);
+ tableStatsQueryBuilder.append(" FROM " + tableName_.toSql());
// Query for getting the per-column NDVs and number of NULLs.
List<String> columnStatsSelectList =
getBaseColumnStatsQuerySelectList(analyzer);
@@ -417,7 +416,7 @@ public class ComputeStatsStmt extends StatementBase {
StringBuilder columnStatsQueryBuilder = new StringBuilder("SELECT ");
columnStatsQueryBuilder.append(Joiner.on(",
").join(columnStatsSelectList));
- columnStatsQueryBuilder.append(" FROM " + sqlTableName);
+ columnStatsQueryBuilder.append(" FROM " + tableName_.toSql());
// Add the WHERE clause to filter out partitions that we don't want to
compute
// incremental stats for. While this is a win in most situations, we would
like to
@@ -426,7 +425,7 @@ public class ComputeStatsStmt extends StatementBase {
// selected in) and there is no partition spec (so no single partition was
explicitly
// selected in).
if (filterPreds.size() > 0 &&
- (validPartStats_.size() > 0 || partitionSpec_ != null)) {
+ (validPartStats_.size() > 0 || partitionSet_ != null)) {
String filterClause = " WHERE " + Joiner.on(" OR ").join(filterPreds);
columnStatsQueryBuilder.append(filterClause);
tableStatsQueryBuilder.append(filterClause);
@@ -529,7 +528,7 @@ public class ComputeStatsStmt extends StatementBase {
return "COMPUTE STATS " + tableName_.toSql();
} else {
return "COMPUTE INCREMENTAL STATS " + tableName_.toSql() +
- partitionSpec_ == null ? "" : partitionSpec_.toSql();
+ partitionSet_ == null ? "" : partitionSet_.toSql();
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
index fe2ff84..e39071f 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
@@ -18,10 +18,10 @@
package org.apache.impala.analysis;
import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.Table;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TDropStatsParams;
import org.apache.impala.thrift.TTableName;
+
import com.google.common.base.Preconditions;
/**
@@ -31,28 +31,29 @@ import com.google.common.base.Preconditions;
public class DropStatsStmt extends StatementBase {
protected final TableName tableName_;
- // If non-null, only drop the statistics for a given partition
- PartitionSpec partitionSpec_ = null;
-
// Set during analysis
- protected String dbName_;
+ protected TableRef tableRef_;
+
+ // If non-null, only drop the statistics for a given partition
+ private final PartitionSet partitionSet_;
/**
* Constructor for building the DROP TABLE/VIEW statement
*/
public DropStatsStmt(TableName tableName) {
this.tableName_ = tableName;
+ this.partitionSet_ = null;
}
- public DropStatsStmt(TableName tableName, PartitionSpec partSpec) {
+ public DropStatsStmt(TableName tableName, PartitionSet partitionSet) {
this.tableName_ = tableName;
- this.partitionSpec_ = partSpec;
+ this.partitionSet_ = partitionSet;
}
@Override
public String toSql() {
StringBuilder sb = new StringBuilder("DROP ");
- if (partitionSpec_ == null) {
+ if (partitionSet_ == null) {
sb.append(" STATS ");
if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
sb.append(tableName_.toSql());
@@ -60,7 +61,7 @@ public class DropStatsStmt extends StatementBase {
sb.append(" INCREMENTAL STATS ");
if (tableName_.getDb() != null) sb.append(tableName_.getDb() + ".");
sb.append(tableName_.toSql());
- sb.append(partitionSpec_.toSql());
+ sb.append(partitionSet_.toSql());
}
return sb.toString();
}
@@ -68,9 +69,8 @@ public class DropStatsStmt extends StatementBase {
public TDropStatsParams toThrift() {
TDropStatsParams params = new TDropStatsParams();
params.setTable_name(new TTableName(getDb(), getTbl()));
-
- if (partitionSpec_ != null) {
- params.setPartition_spec(partitionSpec_.toThrift());
+ if (partitionSet_ != null) {
+ params.setPartition_set(partitionSet_.toThrift());
}
return params;
}
@@ -81,14 +81,25 @@ public class DropStatsStmt extends StatementBase {
*/
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
- dbName_ = analyzer.getTargetDbName(tableName_);
- Table table = analyzer.getTable(tableName_, Privilege.ALTER);
- Preconditions.checkNotNull(table);
- if (partitionSpec_ != null) {
- partitionSpec_.setTableName(tableName_);
- partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
- partitionSpec_.setPartitionShouldExist();
- partitionSpec_.analyze(analyzer);
+ // Resolve and analyze table ref to register privilege and audit events
+ // and to allow us to evaluate partition predicates.
+ tableRef_ = new TableRef(tableName_.toPath(), null, Privilege.ALTER);
+ tableRef_ = analyzer.resolveTableRef(tableRef_);
+ Preconditions.checkNotNull(tableRef_);
+ if (tableRef_ instanceof InlineViewRef) {
+ throw new AnalysisException(
+ String.format("DROP STATS not allowed on a view: %s", tableName_));
+ }
+ if (tableRef_ instanceof CollectionTableRef) {
+ throw new AnalysisException(
+ String.format("DROP STATS not allowed on a nested collection: %s",
tableName_));
+ }
+ tableRef_.analyze(analyzer);
+ if (partitionSet_ != null) {
+ partitionSet_.setTableName(tableRef_.getTable().getTableName());
+ partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
+ partitionSet_.setPartitionShouldExist();
+ partitionSet_.analyze(analyzer);
}
}
@@ -97,8 +108,8 @@ public class DropStatsStmt extends StatementBase {
* the target drop table resides in.
*/
public String getDb() {
- Preconditions.checkNotNull(dbName_);
- return dbName_;
+ Preconditions.checkNotNull(tableRef_);
+ return tableRef_.getTable().getDb().getName();
}
public String getTbl() { return tableName_.getTbl(); }
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
index 3ee2070..42f9400 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InlineViewRef.java
@@ -93,7 +93,8 @@ public class InlineViewRef extends TableRef {
* C'tor for creating inline views that replace a local or catalog view ref.
*/
public InlineViewRef(View view, TableRef origTblRef) {
- super(view.getTableName().toPath(), origTblRef.getExplicitAlias());
+ super(view.getTableName().toPath(), origTblRef.getExplicitAlias(),
+ origTblRef.getPrivilege());
queryStmt_ = view.getQueryStmt().clone();
queryStmt_.reset();
if (view.isLocalView()) queryStmt_.reset();
@@ -142,7 +143,7 @@ public class InlineViewRef extends TableRef {
// Catalog views refs require special analysis settings for authorization.
boolean isCatalogView = (view_ != null && !view_.isLocalView());
if (isCatalogView) {
- analyzer.registerAuthAndAuditEvent(view_, analyzer);
+ analyzer.registerAuthAndAuditEvent(view_, priv_);
if (inlineViewAnalyzer_.isExplain()) {
// If the user does not have privileges on the view's definition
// then we report a masked authorization error so as not to reveal
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
b/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
new file mode 100644
index 0000000..b41f2f9
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSet.java
@@ -0,0 +1,199 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.impala.analysis.BinaryPredicate.Operator;
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.HdfsPartition;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.common.Reference;
+import org.apache.impala.planner.HdfsPartitionPruner;
+import org.apache.impala.thrift.TPartitionKeyValue;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * Represents a set of partitions resulting from evaluating a list of
partition conjuncts
+ * against a table's partition list.
+ */
+public class PartitionSet extends PartitionSpecBase {
+ private final List<Expr> partitionExprs_;
+
+ // Result of analysis
+ private List<HdfsPartition> partitions_ = Lists.newArrayList();
+
+ public PartitionSet(List<Expr> partitionExprs) {
+ this.partitionExprs_ = ImmutableList.copyOf(partitionExprs);
+ }
+
+ public List<HdfsPartition> getPartitions() { return partitions_; }
+
+ @Override
+ public void analyze(Analyzer analyzer) throws AnalysisException {
+ super.analyze(analyzer);
+ List<Expr> conjuncts = Lists.newArrayList();
+ // Do not register column-authorization requests.
+ analyzer.setEnablePrivChecks(false);
+ for (Expr e: partitionExprs_) {
+ e.analyze(analyzer);
+ e.checkReturnsBool("Partition expr", false);
+ conjuncts.addAll(e.getConjuncts());
+ }
+
+ TupleDescriptor desc = analyzer.getDescriptor(tableName_.toString());
+ List<SlotId> partitionSlots = desc.getPartitionSlots();
+ for (Expr e: conjuncts) {
+ e.foldConstantChildren(analyzer);
+ // Make sure there are no constant predicates in the partition exprs.
+ if (e.isConstant()) {
+ throw new AnalysisException(String.format("Invalid partition expr %s.
A " +
+ "partition spec may not contain constant predicates.", e.toSql()));
+ }
+
+ // Make sure every conjunct only contains partition slot refs.
+ if(!e.isBoundBySlotIds(partitionSlots)) {
+ throw new AnalysisException("Partition exprs cannot contain
non-partition " +
+ "column(s): " + e.toSql() + ".");
+ }
+ }
+
+ List<Expr> transformedConjuncts = transformPartitionConjuncts(analyzer,
conjuncts);
+ addIfExists(analyzer, table_, transformedConjuncts);
+
+ try {
+ HdfsPartitionPruner pruner = new HdfsPartitionPruner(desc);
+ partitions_ = pruner.prunePartitions(analyzer, transformedConjuncts,
true);
+ } catch (InternalException e) {
+ throw new AnalysisException("Partition expr evaluation failed in the
backend.", e);
+ }
+
+ if (partitionShouldExist_ != null) {
+ if (partitionShouldExist_ && partitions_.isEmpty()) {
+ throw new AnalysisException("No matching partition(s) found.");
+ } else if (!partitionShouldExist_ && !partitions_.isEmpty()) {
+ throw new AnalysisException("Partition already exists.");
+ }
+ }
+ analyzer.setEnablePrivChecks(true);
+ }
+
+ // Check if we should add IF EXISTS. Fully-specified partition specs don't
add it for
+ // backwards compatibility, while more general partition expressions or
partially
+ // specified partition specs add IF EXISTS by setting partitionShouldExists_
to null.
+ // The given conjuncts are assumed to only reference partition columns.
+ private void addIfExists(
+ Analyzer analyzer, Table table, List<Expr> conjuncts) {
+ boolean add = false;
+ Set<String> partColNames = Sets.newHashSet();
+ Reference<SlotRef> slotRef = new Reference<>();
+ for (Expr e : conjuncts) {
+ if (e instanceof BinaryPredicate) {
+ BinaryPredicate bp = (BinaryPredicate) e;
+ if (bp.getOp() != Operator.EQ || !bp.isSingleColumnPredicate(slotRef,
null)) {
+ add = true;
+ break;
+ }
+ Column partColumn = slotRef.getRef().getDesc().getColumn();
+ Preconditions.checkState(table.isClusteringColumn(partColumn));
+ partColNames.add(partColumn.getName());
+ } else if (e instanceof IsNullPredicate) {
+ IsNullPredicate nullPredicate = (IsNullPredicate) e;
+ Column partColumn = nullPredicate.getBoundSlot().getDesc().getColumn();
+ Preconditions.checkState(table.isClusteringColumn(partColumn));
+ partColNames.add(partColumn.getName());
+ } else {
+ add = true;
+ break;
+ }
+ }
+
+ if (add || partColNames.size() < table.getNumClusteringCols()) {
+ partitionShouldExist_ = null;
+ }
+ }
+
+ // Transform <COL> = NULL into IsNull expr; <String COL> = '' into IsNull
expr and
+ // <String COL> = 'String Value' into lower case.
+ // The reason is that COL = NULL is allowed for selecting the NULL
+ // partition, but a COL = NULL predicate can never be true, so we
+ // need to transform such predicates before feeding them into the
+ // partition pruner. Same logic goes to String transformation.
+ private List<Expr> transformPartitionConjuncts(Analyzer analyzer, List<Expr>
conjuncts)
+ throws AnalysisException {
+ List<Expr> transformedConjuncts = Lists.newArrayList();
+ for (Expr e : conjuncts) {
+ Expr result = e;
+ if (e instanceof BinaryPredicate) {
+ BinaryPredicate bp = ((BinaryPredicate) e);
+ if (bp.getOp() == Operator.EQ) {
+ SlotRef leftChild =
+ bp.getChild(0) instanceof SlotRef ? ((SlotRef) bp.getChild(0)) :
null;
+ NullLiteral nullChild = bp.getChild(1) instanceof NullLiteral ?
+ ((NullLiteral) bp.getChild(1)) : null;
+ StringLiteral stringChild = bp.getChild(1) instanceof StringLiteral ?
+ ((StringLiteral) bp.getChild(1)) : null;
+ if (leftChild != null && nullChild != null) {
+ result = new IsNullPredicate(leftChild, false);
+ } else if (leftChild != null && stringChild != null) {
+ if (stringChild.getStringValue().isEmpty()) {
+ result = new IsNullPredicate(leftChild, false);
+ } else {
+ stringChild = new
StringLiteral(stringChild.getStringValue().toLowerCase());
+ result.setChild(1, stringChild);
+ }
+ }
+ }
+ }
+ result.analyze(analyzer);
+ transformedConjuncts.add(result);
+ }
+ return transformedConjuncts;
+ }
+
+ public List<List<TPartitionKeyValue>> toThrift() {
+ List<List<TPartitionKeyValue>> thriftPartitionSet = Lists.newArrayList();
+ for (HdfsPartition hdfsPartition : partitions_) {
+ List<TPartitionKeyValue> thriftPartitionSpec = Lists.newArrayList();
+ for (int i = 0; i < table_.getNumClusteringCols(); ++i) {
+ String key = table_.getColumns().get(i).getName();
+ String value = PartitionKeyValue.getPartitionKeyValueString(
+ hdfsPartition.getPartitionValue(i), nullPartitionKeyValue_);
+ thriftPartitionSpec.add(new TPartitionKeyValue(key, value));
+ }
+ thriftPartitionSet.add(thriftPartitionSpec);
+ }
+ return thriftPartitionSet;
+ }
+
+ @Override
+ public String toSql() {
+ List<String> partitionExprStr = Lists.newArrayList();
+ for (Expr e : partitionExprs_) {
+ partitionExprStr.add(e.toSql());
+ }
+ return String.format("PARTITION (%s)", Joiner.on(",
").join(partitionExprStr));
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
index 39cc1bb..b89e586 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
@@ -38,20 +38,13 @@ import com.google.common.collect.Sets;
/*
* Represents a partition spec - a collection of partition key/values.
*/
-public class PartitionSpec implements ParseNode {
+public class PartitionSpec extends PartitionSpecBase {
private final ImmutableList<PartitionKeyValue> partitionSpec_;
- private TableName tableName_;
- private Boolean partitionShouldExist_;
- private Privilege privilegeRequirement_;
// Flag to determine if the partition already exists in the target table.
// Set during analysis.
private Boolean partitionExists_;
- // The value Hive is configured to use for NULL partition key values.
- // Set during analysis.
- private String nullPartitionKeyValue_;
-
public PartitionSpec(List<PartitionKeyValue> partitionSpec) {
this.partitionSpec_ = ImmutableList.copyOf(partitionSpec);
}
@@ -60,48 +53,14 @@ public class PartitionSpec implements ParseNode {
return partitionSpec_;
}
- public String getTbl() { return tableName_.getTbl(); }
- public void setTableName(TableName tableName) { this.tableName_ = tableName;
}
public boolean partitionExists() {
Preconditions.checkNotNull(partitionExists_);
return partitionExists_;
}
- // The value Hive is configured to use for NULL partition key values.
- // Set during analysis.
- public String getNullPartitionKeyValue() {
- Preconditions.checkNotNull(nullPartitionKeyValue_);
- return nullPartitionKeyValue_;
- }
-
- // If set, an additional analysis check will be performed to validate the
target table
- // contains the given partition spec.
- public void setPartitionShouldExist() { partitionShouldExist_ =
Boolean.TRUE; }
-
- // If set, an additional analysis check will be performed to validate the
target table
- // does not contain the given partition spec.
- public void setPartitionShouldNotExist() { partitionShouldExist_ =
Boolean.FALSE; }
-
- // Set the privilege requirement for this partition spec. Must be set prior
to
- // analysis.
- public void setPrivilegeRequirement(Privilege privilege) {
- privilegeRequirement_ = privilege;
- }
-
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
- Preconditions.checkNotNull(tableName_);
- Preconditions.checkNotNull(privilegeRequirement_);
-
- // Skip adding an audit event when analyzing partitions. The parent table
should
- // be audited outside of the PartitionSpec.
- Table table = analyzer.getTable(tableName_, privilegeRequirement_, false);
- String tableName = table.getDb().getName() + "." + getTbl();
-
- // Make sure the target table is partitioned.
- if (table.getMetaStoreTable().getPartitionKeysSize() == 0) {
- throw new AnalysisException("Table is not partitioned: " + tableName);
- }
+ super.analyze(analyzer);
// Make sure static partition key values only contain constant exprs.
for (PartitionKeyValue kv: partitionSpec_) {
@@ -110,7 +69,7 @@ public class PartitionSpec implements ParseNode {
// Get all keys in the target table.
Set<String> targetPartitionKeys = Sets.newHashSet();
- for (FieldSchema fs: table.getMetaStoreTable().getPartitionKeys()) {
+ for (FieldSchema fs: table_.getMetaStoreTable().getPartitionKeys()) {
targetPartitionKeys.add(fs.getName().toLowerCase());
}
@@ -118,7 +77,7 @@ public class PartitionSpec implements ParseNode {
if (targetPartitionKeys.size() != partitionSpec_.size()) {
throw new AnalysisException(String.format("Items in partition spec must
exactly " +
"match the partition columns in the table definition: %s (%d vs %d)",
- tableName, partitionSpec_.size(), targetPartitionKeys.size()));
+ tableName_, partitionSpec_.size(), targetPartitionKeys.size()));
}
Set<String> keyNames = Sets.newHashSet();
@@ -130,14 +89,14 @@ public class PartitionSpec implements ParseNode {
throw new AnalysisException("Duplicate partition key name: " +
pk.getColName());
}
- Column c = table.getColumn(pk.getColName());
+ Column c = table_.getColumn(pk.getColName());
if (c == null) {
throw new AnalysisException(String.format(
- "Partition column '%s' not found in table: %s", pk.getColName(),
tableName));
+ "Partition column '%s' not found in table: %s", pk.getColName(),
tableName_));
} else if (!targetPartitionKeys.contains(pk.getColName().toLowerCase()))
{
throw new AnalysisException(String.format(
"Column '%s' is not a partition column in table: %s",
- pk.getColName(), tableName));
+ pk.getColName(), tableName_));
} else if (pk.getValue() instanceof NullLiteral) {
// No need for further analysis checks of this partition key value.
continue;
@@ -160,12 +119,7 @@ public class PartitionSpec implements ParseNode {
pk.getValue().toSql(), colType.toString(), pk.getColName()));
}
}
- // Only HDFS tables are partitioned.
- Preconditions.checkState(table instanceof HdfsTable);
- HdfsTable hdfsTable = (HdfsTable) table;
- nullPartitionKeyValue_ = hdfsTable.getNullPartitionKeyValue();
-
- partitionExists_ = hdfsTable.getPartition(partitionSpec_) != null;
+ partitionExists_ = table_.getPartition(partitionSpec_) != null;
if (partitionShouldExist_ != null) {
if (partitionShouldExist_ && !partitionExists_) {
throw new AnalysisException("Partition spec does not exist: (" +
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
new file mode 100644
index 0000000..7d9a800
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
@@ -0,0 +1,94 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.package com.cloudera.impala.analysis;
+
+package org.apache.impala.analysis;
+
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for PartitionSpec and PartitionSet containing the partition
+ * specifications of related DDL operations.
+ */
+public abstract class PartitionSpecBase implements ParseNode {
+ protected HdfsTable table_;
+ protected TableName tableName_;
+ protected Boolean partitionShouldExist_;
+ protected Privilege privilegeRequirement_;
+ // The value Hive is configured to use for NULL partition key values.
+ // Set during analysis.
+ protected String nullPartitionKeyValue_;
+
+ public String getTbl() { return tableName_.getTbl(); }
+
+ public void setTableName(TableName tableName) {this.tableName_ = tableName; }
+
+ // The value Hive is configured to use for NULL partition key values.
+ // Set during analysis.
+ public String getNullPartitionKeyValue() {
+ Preconditions.checkNotNull(nullPartitionKeyValue_);
+ return nullPartitionKeyValue_;
+ }
+
+ // If set, an additional analysis check will be performed to validate the
target table
+ // contains the given partition spec.
+ public void setPartitionShouldExist() { partitionShouldExist_ =
Boolean.TRUE; }
+
+ // If set, an additional analysis check will be performed to validate the
target table
+ // does not contain the given partition spec.
+ public void setPartitionShouldNotExist() { partitionShouldExist_ =
Boolean.FALSE; }
+
+ public boolean getPartitionShouldExist() {
+ return partitionShouldExist_ != null && partitionShouldExist_;
+ }
+
+ public boolean getPartitionShouldNotExist() {
+ return partitionShouldExist_ != null && !partitionShouldExist_;
+ }
+
+ // Set the privilege requirement for this partition spec. Must be set prior
to
+ // analysis.
+ public void setPrivilegeRequirement(Privilege privilege) {
+ privilegeRequirement_ = privilege;
+ }
+
+ @Override
+ public void analyze(Analyzer analyzer) throws AnalysisException {
+ Preconditions.checkNotNull(tableName_);
+ Preconditions.checkNotNull(privilegeRequirement_);
+
+ // Skip adding an audit event when analyzing partitions. The parent table
should
+ // be audited outside of the PartitionSpec.
+ Table table = analyzer.getTable(tableName_, privilegeRequirement_, false);
+
+ // Make sure the target table is partitioned.
+ if (table.getMetaStoreTable().getPartitionKeysSize() == 0) {
+ throw new AnalysisException("Table is not partitioned: " + tableName_);
+ }
+
+ // Only HDFS tables are partitioned.
+ Preconditions.checkState(table instanceof HdfsTable);
+ table_ = (HdfsTable) table;
+ nullPartitionKeyValue_ = table_.getNullPartitionKeyValue();
+ }
+
+ @Override
+ public abstract String toSql();
+}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
index bec1ed3..d102bb9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ShowFilesStmt.java
@@ -27,8 +27,6 @@ import org.apache.impala.catalog.Table;
import org.apache.impala.catalog.HdfsTable;
import org.apache.impala.thrift.TTableName;
import com.google.common.base.Preconditions;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
/**
* Representation of a SHOW FILES statement.
@@ -38,52 +36,60 @@ import com.google.common.collect.Lists;
*
*/
public class ShowFilesStmt extends StatementBase {
- private TableName tableName_;
+ private final TableName tableName_;
// Show files for all the partitions if this is null.
- private final PartitionSpec partitionSpec_;
+ private final PartitionSet partitionSet_;
// Set during analysis.
protected Table table_;
- public ShowFilesStmt(TableName tableName, PartitionSpec partitionSpec) {
+ public ShowFilesStmt(TableName tableName, PartitionSet partitionSet) {
this.tableName_ = tableName;
- this.partitionSpec_ = partitionSpec;
+ this.partitionSet_ = partitionSet;
}
@Override
public String toSql() {
StringBuilder strBuilder = new StringBuilder();
strBuilder.append("SHOW FILES IN " + tableName_.toString());
- if (partitionSpec_ != null) strBuilder.append(" " +
partitionSpec_.toSql());
+ if (partitionSet_ != null) strBuilder.append(" " + partitionSet_.toSql());
return strBuilder.toString();
}
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
- if (!tableName_.isFullyQualified()) {
- tableName_ = new TableName(analyzer.getDefaultDb(), tableName_.getTbl());
+ // Resolve and analyze table ref to register privilege and audit events
+ // and to allow us to evaluate partition predicates.
+ TableRef tableRef = new TableRef(tableName_.toPath(), null,
Privilege.VIEW_METADATA);
+ tableRef = analyzer.resolveTableRef(tableRef);
+ if (tableRef instanceof InlineViewRef ||
+ tableRef instanceof CollectionTableRef) {
+ throw new AnalysisException(String.format(
+ "SHOW FILES not applicable to a non hdfs table: %s", tableName_));
}
- table_ = analyzer.getTable(tableName_, Privilege.VIEW_METADATA);
+ table_ = tableRef.getTable();
+ Preconditions.checkNotNull(table_);
if (!(table_ instanceof HdfsTable)) {
throw new AnalysisException(String.format(
- "SHOW FILES not applicable to a non hdfs table: %s",
table_.getFullName()));
+ "SHOW FILES not applicable to a non hdfs table: %s", tableName_));
}
+ tableRef.analyze(analyzer);
// Analyze the partition spec, if one was specified.
- if (partitionSpec_ != null) {
- partitionSpec_.setTableName(tableName_);
- partitionSpec_.setPartitionShouldExist();
- partitionSpec_.setPrivilegeRequirement(Privilege.VIEW_METADATA);
- partitionSpec_.analyze(analyzer);
+ if (partitionSet_ != null) {
+ partitionSet_.setTableName(table_.getTableName());
+ partitionSet_.setPartitionShouldExist();
+ partitionSet_.setPrivilegeRequirement(Privilege.VIEW_METADATA);
+ partitionSet_.analyze(analyzer);
}
}
public TShowFilesParams toThrift() {
TShowFilesParams params = new TShowFilesParams();
- params.setTable_name(new TTableName(tableName_.getDb(),
tableName_.getTbl()));
- if (partitionSpec_ != null) {
- params.setPartition_spec(partitionSpec_.toThrift());
+ params.setTable_name(new TTableName(table_.getDb().getName(),
table_.getName()));
+ if (partitionSet_ != null) {
+ params.setPartition_set(partitionSet_.toThrift());
}
return params;
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/TableRef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
index 6403091..d6bbfd2 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
@@ -22,6 +22,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Set;
+import org.apache.impala.authorization.Privilege;
import org.apache.impala.catalog.HdfsTable;
import org.apache.impala.catalog.Table;
import org.apache.impala.common.AnalysisException;
@@ -77,6 +78,9 @@ public class TableRef implements ParseNode {
// Indicates whether this table ref is given an explicit alias,
protected boolean hasExplicitAlias_;
+ // Analysis registers privilege and/or audit requests based on this
privilege.
+ protected final Privilege priv_;
+
protected JoinOperator joinOp_;
protected ArrayList<String> joinHints_;
protected List<String> usingColNames_;
@@ -125,7 +129,10 @@ public class TableRef implements ParseNode {
/////////////////////////////////////////
public TableRef(List<String> path, String alias) {
- super();
+ this(path, alias, Privilege.SELECT);
+ }
+
+ public TableRef(List<String> path, String alias, Privilege priv) {
rawPath_ = path;
if (alias != null) {
aliases_ = new String[] { alias.toLowerCase() };
@@ -133,6 +140,7 @@ public class TableRef implements ParseNode {
} else {
hasExplicitAlias_ = false;
}
+ priv_ = priv;
isAnalyzed_ = false;
replicaPreference_ = null;
randomReplica_ = false;
@@ -146,6 +154,7 @@ public class TableRef implements ParseNode {
resolvedPath_ = other.resolvedPath_;
aliases_ = other.aliases_;
hasExplicitAlias_ = other.hasExplicitAlias_;
+ priv_ = other.priv_;
joinOp_ = other.joinOp_;
joinHints_ =
(other.joinHints_ != null) ? Lists.newArrayList(other.joinHints_) :
null;
@@ -252,6 +261,7 @@ public class TableRef implements ParseNode {
Preconditions.checkNotNull(resolvedPath_);
return resolvedPath_.getRootTable();
}
+ public Privilege getPrivilege() { return priv_; }
public ArrayList<String> getJoinHints() { return joinHints_; }
public ArrayList<String> getTableHints() { return tableHints_; }
public Expr getOnClause() { return onClause_; }