IMPALA-2890: Support ALTER TABLE statements for Kudu tables
With this commit, we add support for additional ALTER TABLE statements
against Kudu tables. The new supported ALTER TABLE operations for Kudu are:
- ADD/DROP range partitions. Syntax:
ALTER TABLE <tbl_name> ADD [IF NOT EXISTS] RANGE <kudu_partition_spec>
ALTER TABLE <tbl_name> DROP [IF EXISTS] RANGE <kudu_partition_spec>
- ADD/DROP/RENAME column. Syntax:
ALTER TABLE <tbl_name> ADD COLUMNS (col_spec, [col_spec, ...])
ALTER TABLE <tbl_name> DROP COLUMN <col_name>
ALTER TABLE <tbl_name> CHANGE COLUMN <old> <new_name> <type>
- Rename Kudu table using the 'kudu.table_name' table property. Example:
ALTER TABLE <tbl_name> SET TBLPROPERTY ('kudu.tbl_name'='<new_name>'),
will change the underlying Kudu table name to <new_name>.
- Renaming the HMS/Catalog table entry of a Kudu table is supported using the
existing ALTER TABLE <tbl_name> RENAME TO <new_tbl_name> syntax.
Not supported:
- ALTER TABLE <tbl_name> REPLACE COLUMNS
Change-Id: I04bc87e04e05da5cc03edec79d13cedfd2012896
Reviewed-on: http://gerrit.cloudera.org:8080/5136
Reviewed-by: Dimitris Tsirogiannis <[email protected]>
Tested-by: Internal Jenkins
Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/9f497ba0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/9f497ba0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/9f497ba0
Branch: refs/heads/master
Commit: 9f497ba02f2c5147ddfa5be62a47f2bb45ac97af
Parents: 90bf40d
Author: Dimitris Tsirogiannis <[email protected]>
Authored: Thu Nov 17 23:06:02 2016 -0800
Committer: Internal Jenkins <[email protected]>
Committed: Wed Nov 30 04:55:03 2016 +0000
----------------------------------------------------------------------
common/thrift/JniCatalog.thrift | 26 +-
fe/src/main/cup/sql-parser.cup | 13 +
.../AlterTableAddDropRangePartitionStmt.java | 111 +++++++
.../analysis/AlterTableAddPartitionStmt.java | 17 +-
.../analysis/AlterTableAddReplaceColsStmt.java | 28 ++
.../analysis/AlterTableChangeColStmt.java | 19 +-
.../analysis/AlterTableDropPartitionStmt.java | 9 +-
.../analysis/AlterTableSetFileFormatStmt.java | 7 +
.../analysis/AlterTableSetLocationStmt.java | 4 +
.../apache/impala/analysis/AlterTableStmt.java | 12 +-
.../org/apache/impala/analysis/ColumnDef.java | 17 +-
.../apache/impala/analysis/DistributeParam.java | 3 +-
.../org/apache/impala/analysis/ToSqlUtils.java | 3 +
.../org/apache/impala/catalog/KuduTable.java | 54 +++-
.../impala/service/CatalogOpExecutor.java | 81 ++++-
.../impala/service/KuduCatalogOpExecutor.java | 175 +++++++++-
.../apache/impala/analysis/AnalyzeDDLTest.java | 77 ++++-
.../org/apache/impala/analysis/ParserTest.java | 36 +++
.../queries/QueryTest/kudu_alter.test | 317 ++++++++++++++++++-
tests/query_test/test_kudu.py | 13 +
20 files changed, 964 insertions(+), 58 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/common/thrift/JniCatalog.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift
index 7fa7f7b..8bb07e6 100644
--- a/common/thrift/JniCatalog.thrift
+++ b/common/thrift/JniCatalog.thrift
@@ -58,6 +58,7 @@ enum TDdlType {
enum TAlterTableType {
ADD_REPLACE_COLUMNS,
ADD_PARTITION,
+ ADD_DROP_RANGE_PARTITION,
CHANGE_COLUMN,
DROP_COLUMN,
DROP_PARTITION,
@@ -176,16 +177,34 @@ struct TAlterTableAddPartitionParams {
1: required list<CatalogObjects.TPartitionKeyValue> partition_spec
// If true, no error is raised if a partition with the same spec already
exists.
- 3: required bool if_not_exists
+ 2: required bool if_not_exists
// Optional HDFS storage location for the Partition. If not specified the
// default storage location is used.
- 2: optional string location
+ 3: optional string location
// Optional caching operation to perform on the newly added partition.
4: optional THdfsCachingOp cache_op
}
+enum TRangePartitionOperationType {
+ ADD,
+ DROP
+}
+
+// Parameters for ALTER TABLE ADD/DROP RANGE PARTITION command
+struct TAlterTableAddDropRangePartitionParams {
+ // Range partition to add/drop
+ 1: required CatalogObjects.TRangePartition range_partition_spec
+
+ // If true, ignore errors raised while adding/dropping a range
+ // partition
+ 2: required bool ignore_errors
+
+ // Operation
+ 3: required TRangePartitionOperationType type
+}
+
// Parameters for ALTER TABLE DROP COLUMN commands.
struct TAlterTableDropColParams {
// Column name to drop.
@@ -319,6 +338,9 @@ struct TAlterTableParams {
// Parameters for ALTER TABLE SET CACHED|UNCACHED
13: optional TAlterTableSetCachedParams set_cached_params
+
+ // Parameters for ALTER TABLE ADD/ADD RANGE PARTITION
+ 14: optional TAlterTableAddDropRangePartitionParams
add_drop_range_partition_params
}
// Parameters of CREATE TABLE LIKE commands
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/cup/sql-parser.cup
----------------------------------------------------------------------
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index 2fc765d..a375c75 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -33,6 +33,7 @@ import org.apache.impala.analysis.ColumnDef.Option;
import org.apache.impala.analysis.UnionStmt.Qualifier;
import org.apache.impala.analysis.UnionStmt.UnionOperand;
import org.apache.impala.analysis.RangePartition;
+import
org.apache.impala.analysis.AlterTableAddDropRangePartitionStmt.Operation;
import org.apache.impala.catalog.ArrayType;
import org.apache.impala.catalog.MapType;
import org.apache.impala.catalog.RowFormat;
@@ -921,6 +922,12 @@ alter_tbl_stmt ::=
:}
| KW_ALTER KW_TABLE table_name:table KW_DROP opt_kw_column
ident_or_default:col_name
{: RESULT = new AlterTableDropColStmt(table, col_name); :}
+ | KW_ALTER KW_TABLE table_name:table KW_ADD if_not_exists_val:if_not_exists
+ KW_RANGE range_param:partition
+ {:
+ RESULT = new AlterTableAddDropRangePartitionStmt(table, partition,
if_not_exists,
+ Operation.ADD);
+ :}
| KW_ALTER KW_TABLE table_name:table KW_CHANGE opt_kw_column
ident_or_default:col_name
column_def:col_def
{: RESULT = new AlterTableChangeColStmt(table, col_name, col_def); :}
@@ -930,6 +937,12 @@ alter_tbl_stmt ::=
| KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET
KW_FILEFORMAT
file_format_val:file_format
{: RESULT = new AlterTableSetFileFormatStmt(table, partitions, file_format);
:}
+ | KW_ALTER KW_TABLE table_name:table KW_DROP if_exists_val:if_exists
+ KW_RANGE range_param:partition
+ {:
+ RESULT = new AlterTableAddDropRangePartitionStmt(table, partition,
if_exists,
+ Operation.DROP);
+ :}
| KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET
KW_LOCATION STRING_LITERAL:location
{:
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
new file mode 100644
index 0000000..b1618e0
--- /dev/null
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddDropRangePartitionStmt.java
@@ -0,0 +1,111 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import java.util.List;
+
+import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.thrift.TRangePartitionOperationType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Represents an ALTER TABLE ADD/DROP RANGE PARTITION statement.
+ */
+public class AlterTableAddDropRangePartitionStmt extends AlterTableStmt {
+ private final boolean ignoreErrors_;
+ private final RangePartition rangePartitionSpec_;
+
+ public enum Operation {
+ ADD("IF NOT EXISTS", TRangePartitionOperationType.ADD),
+ DROP("IF EXISTS", TRangePartitionOperationType.DROP);
+
+ private final String option_;
+ private final TRangePartitionOperationType type_;
+ Operation(String option, TRangePartitionOperationType type) {
+ option_ = option;
+ type_ = type;
+ }
+ String option() { return option_; }
+ TRangePartitionOperationType type() { return type_; }
+ }
+
+ private final Operation operation_;
+
+ public AlterTableAddDropRangePartitionStmt(TableName tableName,
+ RangePartition rangePartitionSpec, boolean ignoreErrors, Operation op) {
+ super(tableName);
+ Preconditions.checkNotNull(rangePartitionSpec);
+ rangePartitionSpec_ = rangePartitionSpec;
+ ignoreErrors_ = ignoreErrors;
+ operation_ = op;
+ }
+
+ @Override
+ public String toSql() {
+ StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
+ sb.append(" " + operation_.name());
+ if (ignoreErrors_) sb.append(" " + operation_.option());
+ sb.append(" " + rangePartitionSpec_.toSql());
+ return sb.toString();
+ }
+
+ @Override
+ public TAlterTableParams toThrift() {
+ TAlterTableParams params = super.toThrift();
+ params.setAlter_type(TAlterTableType.ADD_DROP_RANGE_PARTITION);
+ TAlterTableAddDropRangePartitionParams partParams =
+ new TAlterTableAddDropRangePartitionParams();
+ partParams.setRange_partition_spec(rangePartitionSpec_.toThrift());
+ partParams.setIgnore_errors(ignoreErrors_);
+ partParams.setType(operation_.type());
+ params.setAdd_drop_range_partition_params(partParams);
+ return params;
+ }
+
+ @Override
+ public void analyze(Analyzer analyzer) throws AnalysisException {
+ super.analyze(analyzer);
+ Table table = getTargetTable();
+ if (!(table instanceof KuduTable)) {
+ throw new AnalysisException(String.format("Table %s does not support
range " +
+ "partitions: RANGE %s", table.getFullName(),
rangePartitionSpec_.toSql()));
+ }
+ KuduTable kuduTable = (KuduTable) table;
+ List<String> colNames = kuduTable.getRangeDistributionColNames();
+ if (colNames.isEmpty()) {
+ throw new AnalysisException(String.format("Cannot add/drop partition %s:
" +
+ "Kudu table %s doesn't have a range-based distribution.",
+ rangePartitionSpec_.toSql(), kuduTable.getName()));
+ }
+ List<ColumnDef> rangeColDefs =
Lists.newArrayListWithCapacity(colNames.size());
+ for (String colName: colNames) {
+ Column col = kuduTable.getColumn(colName);
+ ColumnDef colDef = new ColumnDef(col.getName(), new
TypeDef(col.getType()));
+ colDef.analyze(analyzer);
+ rangeColDefs.add(colDef);
+ }
+ rangePartitionSpec_.analyze(analyzer, rangeColDefs);
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
index 4b5fbb4..b946436 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -19,15 +19,17 @@ package org.apache.impala.analysis;
import org.apache.impala.authorization.Privilege;
import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.KuduTable;
import org.apache.impala.catalog.Table;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.common.FileSystemUtil;
import org.apache.impala.thrift.TAlterTableAddPartitionParams;
import org.apache.impala.thrift.TAlterTableParams;
import org.apache.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.permission.FsAction;
+import com.google.common.base.Preconditions;
+
/**
* Represents an ALTER TABLE ADD PARTITION statement.
*/
@@ -41,7 +43,7 @@ public class AlterTableAddPartitionStmt extends
AlterTableStmt {
PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
HdfsCachingOp cacheOp) {
super(tableName);
- Preconditions.checkState(partitionSpec != null);
+ Preconditions.checkNotNull(partitionSpec);
location_ = location;
ifNotExists_ = ifNotExists;
partitionSpec_ = partitionSpec;
@@ -60,9 +62,7 @@ public class AlterTableAddPartitionStmt extends
AlterTableStmt {
sb.append("IF NOT EXISTS ");
}
sb.append(" " + partitionSpec_.toSql());
- if (location_ != null) {
- sb.append(String.format(" LOCATION '%s'", location_));
- }
+ if (location_ != null) sb.append(String.format(" LOCATION '%s'",
location_));
if (cacheOp_ != null) sb.append(cacheOp_.toSql());
return sb.toString();
}
@@ -83,16 +83,19 @@ public class AlterTableAddPartitionStmt extends
AlterTableStmt {
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
super.analyze(analyzer);
+ Table table = getTargetTable();
+ if (table instanceof KuduTable) {
+ throw new AnalysisException("ALTER TABLE ADD PARTITION is not supported
for " +
+ "Kudu tables: " + partitionSpec_.toSql());
+ }
if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
partitionSpec_.analyze(analyzer);
-
if (location_ != null) {
location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
}
boolean shouldCache = false;
- Table table = getTargetTable();
if (cacheOp_ != null) {
cacheOp_.analyze(analyzer);
shouldCache = cacheOp_.shouldCache();
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
index feda138..9b2d7c0 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.impala.catalog.Column;
import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.KuduTable;
import org.apache.impala.catalog.Table;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
@@ -79,6 +80,12 @@ public class AlterTableAddReplaceColsStmt extends
AlterTableStmt {
"supported on HBase tables.");
}
+ boolean isKuduTable = t instanceof KuduTable;
+ if (isKuduTable && replaceExistingCols_) {
+ throw new AnalysisException("ALTER TABLE REPLACE COLUMNS is not " +
+ "supported on Kudu tables.");
+ }
+
// Build a set of the partition keys for the table.
Set<String> existingPartitionKeys = Sets.newHashSet();
for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
@@ -103,6 +110,27 @@ public class AlterTableAddReplaceColsStmt extends
AlterTableStmt {
} else if (!colNames.add(colName)) {
throw new AnalysisException("Duplicate column name: " + colName);
}
+
+ if (isKuduTable) {
+ if (c.getType().isComplexType()) {
+ throw new AnalysisException("Kudu tables do not support complex
types: " +
+ c.toString());
+ }
+ if (c.isPrimaryKey()) {
+ throw new AnalysisException("Cannot add a primary key using an ALTER
TABLE " +
+ "ADD COLUMNS statement: " + c.toString());
+ }
+ if (c.hasEncoding() || c.hasCompression() || c.hasBlockSize()) {
+ // Kudu API doesn't support specifying encoding, compression and
desired
+ // block size on a newly added column (see KUDU-1746).
+ throw new AnalysisException("ENCODING, COMPRESSION and " +
+ "BLOCK_SIZE options cannot be specified in an ALTER TABLE ADD
COLUMNS " +
+ "statement: " + c.toString());
+ }
+ } else if (c.hasKuduOptions()) {
+ throw new AnalysisException("The specified column options are only
supported " +
+ "in Kudu tables: " + c.toString());
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
index 5c4bfee..e5d1b20 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
@@ -18,14 +18,17 @@
package org.apache.impala.analysis;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
+import org.apache.impala.catalog.Column;
import org.apache.impala.catalog.HBaseTable;
+import org.apache.impala.catalog.KuduTable;
import org.apache.impala.catalog.Table;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableChangeColParams;
import org.apache.impala.thrift.TAlterTableParams;
import org.apache.impala.thrift.TAlterTableType;
+
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
/**
* Represents an ALTER TABLE CHANGE COLUMN colName newColDef statement.
@@ -40,7 +43,7 @@ public class AlterTableChangeColStmt extends AlterTableStmt {
ColumnDef newColDef) {
super(tableName);
Preconditions.checkNotNull(newColDef);
- Preconditions.checkState(colName != null && !colName.isEmpty());
+ Preconditions.checkState(!Strings.isNullOrEmpty(colName));
colName_ = colName;
newColDef_ = newColDef;
}
@@ -97,5 +100,17 @@ public class AlterTableChangeColStmt extends AlterTableStmt
{
t.getColumn(newColDef_.getColName()) != null) {
throw new AnalysisException("Column already exists: " +
newColDef_.getColName());
}
+ if (newColDef_.hasKuduOptions()) {
+ throw new AnalysisException("Unsupported column options in ALTER TABLE
CHANGE " +
+ "COLUMN statement: " + newColDef_.toString());
+ }
+ if (t instanceof KuduTable) {
+ Column col = t.getColumn(colName_);
+ if (!col.getType().equals(newColDef_.getType())) {
+ throw new AnalysisException(String.format("Cannot change the type of a
Kudu " +
+ "column using an ALTER TABLE CHANGE COLUMN statement: (%s vs %s)",
+ col.getType().toSql(), newColDef_.getType().toSql()));
+ }
+ }
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
index 9eb580d..e183e80 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -18,6 +18,8 @@
package org.apache.impala.analysis;
import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.KuduTable;
+import org.apache.impala.catalog.Table;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableDropPartitionParams;
import org.apache.impala.thrift.TAlterTableParams;
@@ -52,7 +54,7 @@ public class AlterTableDropPartitionStmt extends
AlterTableStmt {
StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
sb.append(" DROP ");
if (ifExists_) sb.append("IF EXISTS ");
- sb.append(" DROP " + partitionSet_.toSql());
+ sb.append(partitionSet_.toSql());
if (purgePartition_) sb.append(" PURGE");
return sb.toString();
}
@@ -72,6 +74,11 @@ public class AlterTableDropPartitionStmt extends
AlterTableStmt {
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
super.analyze(analyzer);
+ Table table = getTargetTable();
+ if (table instanceof KuduTable) {
+ throw new AnalysisException("ALTER TABLE DROP PARTITION is not supported
for " +
+ "Kudu tables: " + partitionSet_.toSql());
+ }
if (!ifExists_) partitionSet_.setPartitionShouldExist();
partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
partitionSet_.analyze(analyzer);
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
index 5b53b80..36db614 100644
---
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
+++
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetFileFormatStmt.java
@@ -17,6 +17,8 @@
package org.apache.impala.analysis;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.KuduTable;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableParams;
import org.apache.impala.thrift.TAlterTableSetFileFormatParams;
@@ -53,5 +55,10 @@ public class AlterTableSetFileFormatStmt extends
AlterTableSetStmt {
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
super.analyze(analyzer);
+ Table tbl = getTargetTable();
+ if (tbl instanceof KuduTable) {
+ throw new AnalysisException("ALTER TABLE SET FILEFORMAT is not supported
" +
+ "on Kudu tables: " + tbl.getFullName());
+ }
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
index d7a7448..f076312 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetLocationStmt.java
@@ -25,6 +25,7 @@ import org.apache.impala.authorization.Privilege;
import org.apache.impala.catalog.HdfsPartition;
import org.apache.impala.catalog.HdfsTable;
import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.KuduTable;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableParams;
import org.apache.impala.thrift.TAlterTableSetLocationParams;
@@ -107,6 +108,9 @@ public class AlterTableSetLocationStmt extends
AlterTableSetStmt {
"uncache before changing the location using: ALTER TABLE %s SET
UNCACHED",
table.getFullName()));
}
+ } else if (table instanceof KuduTable) {
+ throw new AnalysisException("ALTER TABLE SET LOCATION is not supported
on Kudu " +
+ "tables: " + table.getFullName());
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index d86448b..967838e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -17,15 +17,18 @@
package org.apache.impala.analysis;
+import java.util.List;
+
import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.Column;
import org.apache.impala.catalog.DataSourceTable;
import org.apache.impala.catalog.KuduTable;
import org.apache.impala.catalog.Table;
-import org.apache.impala.catalog.View;
import org.apache.impala.common.AnalysisException;
import org.apache.impala.thrift.TAlterTableParams;
import org.apache.impala.thrift.TTableName;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
/**
* Base class for all ALTER TABLE statements.
@@ -84,13 +87,6 @@ public abstract class AlterTableStmt extends StatementBase {
}
Preconditions.checkState(tableRef instanceof BaseTableRef);
table_ = tableRef.getTable();
- if (table_ instanceof KuduTable
- && !(this instanceof AlterTableSetTblProperties)
- && !(this instanceof AlterTableSetColumnStats)
- && !(this instanceof AlterTableOrViewRenameStmt)) {
- throw new AnalysisException(String.format(
- "ALTER TABLE not allowed on Kudu table: %s", tableName_));
- }
if (table_ instanceof DataSourceTable
&& !(this instanceof AlterTableSetColumnStats)) {
throw new AnalysisException(String.format(
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
index f65aa27..8993acb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
@@ -155,10 +155,15 @@ public class ColumnDef {
public void setComment(String comment) { comment_ = comment; }
public String getComment() { return comment_; }
public boolean hasKuduOptions() {
- return isPrimaryKey_ || isNullable_ != null || encodingVal_ != null
- || compressionVal_ != null || defaultValue_ != null || blockSize_ !=
null;
+ return isPrimaryKey() || isNullabilitySet() || hasEncoding() ||
hasCompression()
+ || hasDefaultValue() || hasBlockSize();
}
- public boolean isNullable() { return isNullable_ != null && isNullable_; }
+ public boolean hasEncoding() { return encodingVal_ != null; }
+ public boolean hasCompression() { return compressionVal_ != null; }
+ public boolean hasBlockSize() { return blockSize_ != null; }
+ public boolean isNullabilitySet() { return isNullable_ != null; }
+ public boolean isNullable() { return isNullabilitySet() && isNullable_; }
+ public boolean hasDefaultValue() { return defaultValue_ != null; }
public void analyze(Analyzer analyzer) throws AnalysisException {
// Check whether the column name meets the Metastore's requirements.
@@ -269,15 +274,15 @@ public class ColumnDef {
public String toString() {
StringBuilder sb = new StringBuilder(colName_).append(" ");
if (type_ != null) {
- sb.append(type_);
+ sb.append(type_.toSql());
} else {
- sb.append(typeDef_);
+ sb.append(typeDef_.toSql());
}
if (isPrimaryKey_) sb.append(" PRIMARY KEY");
if (isNullable_ != null) sb.append(isNullable_ ? " NULL" : " NOT NULL");
if (encoding_ != null) sb.append(" ENCODING " + encoding_.toString());
if (compression_ != null) sb.append(" COMPRESSION " +
compression_.toString());
- if (defaultValue_ != null) sb.append(" DEFAULT_VALUE " +
defaultValue_.toSql());
+ if (defaultValue_ != null) sb.append(" DEFAULT " + defaultValue_.toSql());
if (blockSize_ != null) sb.append(" BLOCK_SIZE " + blockSize_.toSql());
if (comment_ != null) sb.append(String.format(" COMMENT '%s'", comment_));
return sb.toString();
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
b/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
index 13fa6e6..0eb1329 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DistributeParam.java
@@ -27,6 +27,7 @@ import org.apache.impala.thrift.TDistributeByRangeParam;
import org.apache.impala.thrift.TDistributeParam;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
@@ -200,7 +201,7 @@ public class DistributeParam implements ParseNode {
}
boolean hasColumnNames() { return !colNames_.isEmpty(); }
-
+ public List<String> getColumnNames() { return
ImmutableList.copyOf(colNames_); }
void setColumnNames(Collection<String> colNames) {
Preconditions.checkState(colNames_.isEmpty());
colNames_.addAll(colNames);
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index 3c0b850..4cd095c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -223,6 +223,9 @@ public class ToSqlUtils {
paramsSql.add(param.toSql());
}
kuduDistributeByParams = Joiner.on(", ").join(paramsSql);
+ } else {
+ // We shouldn't output the columns for external tables
+ colsSql = null;
}
}
HdfsUri tableLocation = location == null ? null : new HdfsUri(location);
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 0e88905..a7f72c3 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -18,6 +18,7 @@
package org.apache.impala.catalog;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Set;
@@ -46,6 +47,7 @@ import org.apache.impala.thrift.TTableType;
import org.apache.impala.util.KuduUtil;
import org.apache.impala.util.TResultRowBuilder;
import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
import org.apache.kudu.client.KuduClient;
import org.apache.kudu.client.KuduException;
import org.apache.kudu.client.LocatedTablet;
@@ -112,6 +114,9 @@ public class KuduTable extends Table {
// supported.
private final List<DistributeParam> distributeBy_ = Lists.newArrayList();
+ // Schema of the underlying Kudu table.
+ private org.apache.kudu.Schema kuduSchema_;
+
protected KuduTable(org.apache.hadoop.hive.metastore.api.Table msTable,
Db db, String name, String owner) {
super(msTable, db, name, owner);
@@ -137,6 +142,7 @@ public class KuduTable extends Table {
public String getKuduTableName() { return kuduTableName_; }
public String getKuduMasterHosts() { return kuduMasters_; }
+ public org.apache.kudu.Schema getKuduSchema() { return kuduSchema_; }
public List<String> getPrimaryKeyColumnNames() {
return ImmutableList.copyOf(primaryKeyColumnNames_);
@@ -147,6 +153,28 @@ public class KuduTable extends Table {
}
/**
+ * Returns the range-based distribution of this table if it exists, null
otherwise.
+ */
+ private DistributeParam getRangeDistribution() {
+ for (DistributeParam distributeParam: distributeBy_) {
+ if (distributeParam.getType() == DistributeParam.Type.RANGE) {
+ return distributeParam;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns the column names of the table's range-based distribution or an
empty
+ * list if the table doesn't have a range-based distribution.
+ */
+ public List<String> getRangeDistributionColNames() {
+ DistributeParam rangeDistribution = getRangeDistribution();
+ if (rangeDistribution == null) return Collections.<String>emptyList();
+ return rangeDistribution.getColumnNames();
+ }
+
+ /**
* Loads the metadata of a Kudu table.
*
* Schema and distribution schemes are loaded directly from Kudu whereas
column stats
@@ -214,7 +242,8 @@ public class KuduTable extends Table {
List<FieldSchema> cols = msTable_.getSd().getCols();
cols.clear();
int pos = 0;
- for (ColumnSchema colSchema: kuduTable.getSchema().getColumns()) {
+ kuduSchema_ = kuduTable.getSchema();
+ for (ColumnSchema colSchema: kuduSchema_.getColumns()) {
KuduColumn kuduCol = KuduColumn.fromColumnSchema(colSchema, pos);
Preconditions.checkNotNull(kuduCol);
// Add the HMS column
@@ -228,13 +257,14 @@ public class KuduTable extends Table {
private void loadDistributeByParams(org.apache.kudu.client.KuduTable
kuduTable) {
Preconditions.checkNotNull(kuduTable);
+ Schema tableSchema = kuduTable.getSchema();
PartitionSchema partitionSchema = kuduTable.getPartitionSchema();
Preconditions.checkState(!colsByPos_.isEmpty());
distributeBy_.clear();
for (HashBucketSchema hashBucketSchema:
partitionSchema.getHashBucketSchemas()) {
List<String> columnNames = Lists.newArrayList();
- for (int colPos: hashBucketSchema.getColumnIds()) {
- columnNames.add(colsByPos_.get(colPos).getName());
+ for (int colId: hashBucketSchema.getColumnIds()) {
+ columnNames.add(getColumnNameById(tableSchema, colId));
}
distributeBy_.add(
DistributeParam.createHashParam(columnNames,
hashBucketSchema.getNumBuckets()));
@@ -243,7 +273,7 @@ public class KuduTable extends Table {
List<Integer> columnIds = rangeSchema.getColumns();
if (columnIds.isEmpty()) return;
List<String> columnNames = Lists.newArrayList();
- for (int colPos: columnIds)
columnNames.add(colsByPos_.get(colPos).getName());
+ for (int colId: columnIds) columnNames.add(getColumnNameById(tableSchema,
colId));
// We don't populate the split values because Kudu's API doesn't currently
support
// retrieving the split values for range partitions.
// TODO: File a Kudu JIRA.
@@ -251,6 +281,16 @@ public class KuduTable extends Table {
}
/**
+ * Returns the name of a Kudu column with id 'colId'.
+ */
+ private String getColumnNameById(Schema tableSchema, int colId) {
+ Preconditions.checkNotNull(tableSchema);
+ ColumnSchema col =
tableSchema.getColumnByIndex(tableSchema.getColumnIndex(colId));
+ Preconditions.checkNotNull(col);
+ return col.getName();
+ }
+
+ /**
* Creates a temporary KuduTable object populated with the specified
properties but has
* an invalid TableId and is not added to the Kudu storage engine or the
* HMS. This is used for CTAS statements.
@@ -342,6 +382,12 @@ public class KuduTable extends Table {
org.apache.kudu.client.KuduTable kuduTable =
client.openTable(kuduTableName_);
List<LocatedTablet> tablets =
kuduTable.getTabletsLocations(BackendConfig.INSTANCE.getKuduClientTimeoutMs());
+ if (tablets.isEmpty()) {
+ TResultRowBuilder builder = new TResultRowBuilder();
+ result.addToRows(
+
builder.add("-1").add("N/A").add("N/A").add("N/A").add("-1").get());
+ return result;
+ }
for (LocatedTablet tab: tablets) {
TResultRowBuilder builder = new TResultRowBuilder();
builder.add("-1"); // The Kudu client API doesn't expose tablet row
counts.
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 1755934..9748003 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -50,14 +50,6 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
import org.apache.impala.common.Reference;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
import org.apache.impala.analysis.FunctionName;
import org.apache.impala.analysis.TableName;
import org.apache.impala.authorization.User;
@@ -96,6 +88,7 @@ import org.apache.impala.common.Pair;
import org.apache.impala.thrift.ImpalaInternalServiceConstants;
import org.apache.impala.thrift.JniCatalogConstants;
import org.apache.impala.thrift.TAlterTableAddPartitionParams;
+import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
import org.apache.impala.thrift.TAlterTableChangeColParams;
import org.apache.impala.thrift.TAlterTableDropColParams;
@@ -367,6 +360,11 @@ public class CatalogOpExecutor {
long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
boolean reloadMetadata = true;
catalog_.getLock().writeLock().unlock();
+
+ if (tbl instanceof KuduTable && altersKuduTable(params.getAlter_type()))
{
+ alterKuduTable(params, response, (KuduTable) tbl, newCatalogVersion);
+ return;
+ }
switch (params.getAlter_type()) {
case ADD_REPLACE_COLUMNS:
TAlterTableAddReplaceColsParams addReplaceColParams =
@@ -376,7 +374,8 @@ public class CatalogOpExecutor {
reloadTableSchema = true;
break;
case ADD_PARTITION:
- TAlterTableAddPartitionParams addPartParams =
params.getAdd_partition_params();
+ TAlterTableAddPartitionParams addPartParams =
+ params.getAdd_partition_params();
// Create and add HdfsPartition object to the corresponding
HdfsTable and load
// its block metadata. Get the new table object with an updated
catalog
// version. If the partition already exists in Hive and
"IfNotExists" is true,
@@ -510,6 +509,55 @@ public class CatalogOpExecutor {
}
/**
+ * Returns true if the given alteration type changes the underlying table
stored in
+ * Kudu in addition to the HMS table.
+ */
+ private boolean altersKuduTable(TAlterTableType type) {
+ return type == TAlterTableType.ADD_REPLACE_COLUMNS
+ || type == TAlterTableType.DROP_COLUMN
+ || type == TAlterTableType.CHANGE_COLUMN
+ || type == TAlterTableType.ADD_DROP_RANGE_PARTITION;
+ }
+
+ /**
+ * Executes the ALTER TABLE command for a Kudu table and reloads its
metadata.
+ */
+ private void alterKuduTable(TAlterTableParams params, TDdlExecResponse
response,
+ KuduTable tbl, long newCatalogVersion) throws ImpalaException {
+ Preconditions.checkState(Thread.holdsLock(tbl));
+ switch (params.getAlter_type()) {
+ case ADD_REPLACE_COLUMNS:
+ TAlterTableAddReplaceColsParams addReplaceColParams =
+ params.getAdd_replace_cols_params();
+ KuduCatalogOpExecutor.addColumn((KuduTable) tbl,
+ addReplaceColParams.getColumns());
+ break;
+ case DROP_COLUMN:
+ TAlterTableDropColParams dropColParams = params.getDrop_col_params();
+ KuduCatalogOpExecutor.dropColumn((KuduTable) tbl,
+ dropColParams.getCol_name());
+ break;
+ case CHANGE_COLUMN:
+ TAlterTableChangeColParams changeColParams =
params.getChange_col_params();
+ KuduCatalogOpExecutor.renameColumn((KuduTable) tbl,
+ changeColParams.getCol_name(), changeColParams.getNew_col_def());
+ break;
+ case ADD_DROP_RANGE_PARTITION:
+ TAlterTableAddDropRangePartitionParams partParams =
+ params.getAdd_drop_range_partition_params();
+ KuduCatalogOpExecutor.addDropRangePartition((KuduTable) tbl,
partParams);
+ break;
+ default:
+ throw new UnsupportedOperationException(
+ "Unsupported ALTER TABLE operation for Kudu tables: " +
+ params.getAlter_type());
+ }
+
+ loadTableMetadata(tbl, newCatalogVersion, true, true, null);
+ addTableToCatalogUpdate(tbl, response.result);
+ }
+
+ /**
* Loads the metadata of a table 'tbl' and assigns a new catalog version.
* reloadFileMetadata', 'reloadTableSchema', and 'partitionsToUpdate'
* are used only for HdfsTables and control which metadata to reload.
@@ -2144,9 +2192,22 @@ public class CatalogOpExecutor {
tbl.getMetaStoreTable().deepCopy();
switch (params.getTarget()) {
case TBL_PROPERTY:
- msTbl.getParameters().putAll(properties);
if (KuduTable.isKuduTable(msTbl)) {
+ // If 'kudu.table_name' is specified and this is a managed table,
rename
+ // the underlying Kudu table.
+ if (properties.containsKey(KuduTable.KEY_TABLE_NAME)
+ && !properties.get(KuduTable.KEY_TABLE_NAME).equals(
+ msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME))
+ && !Table.isExternalTable(msTbl)) {
+ KuduCatalogOpExecutor.renameTable((KuduTable) tbl,
+ properties.get(KuduTable.KEY_TABLE_NAME));
+ }
+ msTbl.getParameters().putAll(properties);
+ // Validate that the new table properties are valid and that
+ // the Kudu table is accessible.
KuduCatalogOpExecutor.validateKuduTblExists(msTbl);
+ } else {
+ msTbl.getParameters().putAll(properties);
}
break;
case SERDE_PROPERTY:
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
----------------------------------------------------------------------
diff --git
a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
index 068f426..82fcab8 100644
--- a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
@@ -31,22 +31,27 @@ import org.apache.impala.catalog.TableNotFoundException;
import org.apache.impala.catalog.Type;
import org.apache.impala.common.ImpalaRuntimeException;
import org.apache.impala.common.Pair;
+import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
import org.apache.impala.thrift.TColumn;
import org.apache.impala.thrift.TCreateTableParams;
import org.apache.impala.thrift.TDistributeParam;
import org.apache.impala.thrift.TRangePartition;
+import org.apache.impala.thrift.TRangePartitionOperationType;
import org.apache.impala.util.KuduUtil;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.ColumnSchema.ColumnSchemaBuilder;
import org.apache.kudu.Schema;
+import org.apache.kudu.client.AlterTableOptions;
import org.apache.kudu.client.CreateTableOptions;
import org.apache.kudu.client.KuduClient;
+import org.apache.kudu.client.KuduException;
import org.apache.kudu.client.PartialRow;
import org.apache.kudu.client.RangePartitionBound;
import org.apache.log4j.Logger;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
/**
* This is a helper for the CatalogOpExecutor to provide Kudu related DDL
functionality
@@ -140,16 +145,11 @@ public class KuduCatalogOpExecutor {
tableOpts.setRangePartitionColumns(rangePartitionColumns);
for (TRangePartition rangePartition:
distParam.getBy_range_param().getRange_partitions()) {
- Preconditions.checkState(rangePartition.isSetLower_bound_values()
- || rangePartition.isSetUpper_bound_values());
- Pair<PartialRow, RangePartitionBound> lowerBound =
- KuduUtil.buildRangePartitionBound(schema,
rangePartitionColumns,
- rangePartition.getLower_bound_values(),
- rangePartition.isIs_lower_bound_inclusive());
- Pair<PartialRow, RangePartitionBound> upperBound =
- KuduUtil.buildRangePartitionBound(schema,
rangePartitionColumns,
- rangePartition.getUpper_bound_values(),
- rangePartition.isIs_upper_bound_inclusive());
+ List<Pair<PartialRow, RangePartitionBound>> rangeBounds =
+ getRangePartitionBounds(rangePartition, schema,
rangePartitionColumns);
+ Preconditions.checkState(rangeBounds.size() == 2);
+ Pair<PartialRow, RangePartitionBound> lowerBound =
rangeBounds.get(0);
+ Pair<PartialRow, RangePartitionBound> upperBound =
rangeBounds.get(1);
tableOpts.addRangePartition(lowerBound.first, upperBound.first,
lowerBound.second, upperBound.second);
}
@@ -266,4 +266,159 @@ public class KuduCatalogOpExecutor {
"on master '%s'", kuduTableName, masterHosts), e);
}
}
+
+ /**
+ * Renames a Kudu table.
+ */
+ public static void renameTable(KuduTable tbl, String newName)
+ throws ImpalaRuntimeException {
+ Preconditions.checkState(!Strings.isNullOrEmpty(newName));
+ AlterTableOptions alterTableOptions = new AlterTableOptions();
+ alterTableOptions.renameTable(newName);
+ try (KuduClient client =
KuduUtil.createKuduClient(tbl.getKuduMasterHosts())) {
+ client.alterTable(tbl.getKuduTableName(), alterTableOptions);
+ } catch (KuduException e) {
+ throw new ImpalaRuntimeException(String.format("Error renaming Kudu
table " +
+ "%s to %s", tbl.getName(), newName), e);
+ }
+ }
+
+ /**
+ * Adds/drops a range partition.
+ */
+ public static void addDropRangePartition(KuduTable tbl,
+ TAlterTableAddDropRangePartitionParams params) throws
ImpalaRuntimeException {
+ TRangePartition rangePartition = params.getRange_partition_spec();
+ List<Pair<PartialRow, RangePartitionBound>> rangeBounds =
+ getRangePartitionBounds(rangePartition, tbl);
+ Preconditions.checkState(rangeBounds.size() == 2);
+ Pair<PartialRow, RangePartitionBound> lowerBound = rangeBounds.get(0);
+ Pair<PartialRow, RangePartitionBound> upperBound = rangeBounds.get(1);
+ AlterTableOptions alterTableOptions = new AlterTableOptions();
+ TRangePartitionOperationType type = params.getType();
+ if (type == TRangePartitionOperationType.ADD) {
+ alterTableOptions.addRangePartition(lowerBound.first, upperBound.first,
+ lowerBound.second, upperBound.second);
+ } else {
+ alterTableOptions.dropRangePartition(lowerBound.first, upperBound.first,
+ lowerBound.second, upperBound.second);
+ }
+ try (KuduClient client =
KuduUtil.createKuduClient(tbl.getKuduMasterHosts())) {
+ client.alterTable(tbl.getKuduTableName(), alterTableOptions);
+ } catch (KuduException e) {
+ if (!params.isIgnore_errors()) {
+ throw new ImpalaRuntimeException(String.format("Error %s range
partition in " +
+ "table %s",
+ (type == TRangePartitionOperationType.ADD ? "adding" : "dropping"),
+ tbl.getName()), e);
+ }
+ }
+ }
+
+ private static List<Pair<PartialRow, RangePartitionBound>>
getRangePartitionBounds(
+ TRangePartition rangePartition, KuduTable tbl) throws
ImpalaRuntimeException {
+ return getRangePartitionBounds(rangePartition, tbl.getKuduSchema(),
+ tbl.getRangeDistributionColNames());
+ }
+
+ /**
+ * Returns the bounds of a range partition in two <PartialRow,
RangePartitionBound>
+ * pairs to be used in Kudu API calls for ALTER and CREATE TABLE statements.
+ */
+ private static List<Pair<PartialRow, RangePartitionBound>>
getRangePartitionBounds(
+ TRangePartition rangePartition, Schema schema,
+ List<String> rangeDistributionColNames) throws ImpalaRuntimeException {
+ Preconditions.checkNotNull(schema);
+ Preconditions.checkState(!rangeDistributionColNames.isEmpty());
+ Preconditions.checkState(rangePartition.isSetLower_bound_values()
+ || rangePartition.isSetUpper_bound_values());
+ List<Pair<PartialRow, RangePartitionBound>> rangeBounds =
+ Lists.newArrayListWithCapacity(2);
+ Pair<PartialRow, RangePartitionBound> lowerBound =
+ KuduUtil.buildRangePartitionBound(schema, rangeDistributionColNames,
+ rangePartition.getLower_bound_values(),
+ rangePartition.isIs_lower_bound_inclusive());
+ rangeBounds.add(lowerBound);
+ Pair<PartialRow, RangePartitionBound> upperBound =
+ KuduUtil.buildRangePartitionBound(schema, rangeDistributionColNames,
+ rangePartition.getUpper_bound_values(),
+ rangePartition.isIs_upper_bound_inclusive());
+ rangeBounds.add(upperBound);
+ return rangeBounds;
+ }
+
+ /**
+ * Adds a column to an existing Kudu table.
+ */
+ public static void addColumn(KuduTable tbl, List<TColumn> columns)
+ throws ImpalaRuntimeException {
+ AlterTableOptions alterTableOptions = new AlterTableOptions();
+ for (TColumn column: columns) {
+ Type type = Type.fromThrift(column.getColumnType());
+ Preconditions.checkState(type != null);
+ org.apache.kudu.Type kuduType = KuduUtil.fromImpalaType(type);
+ boolean isNullable = column.isSetIs_nullable() && column.isIs_nullable();
+ if (isNullable) {
+ if (column.isSetDefault_value()) {
+ // See KUDU-1747
+ throw new ImpalaRuntimeException(String.format("Error adding
nullable " +
+ "column to Kudu table %s. Cannot specify a default value for a
nullable " +
+ "column", tbl.getKuduTableName()));
+ }
+ alterTableOptions.addNullableColumn(column.getColumnName(), kuduType);
+ } else {
+ Object defaultValue = null;
+ if (column.isSetDefault_value()) {
+ defaultValue =
KuduUtil.getKuduDefaultValue(column.getDefault_value(), kuduType,
+ column.getColumnName());
+ }
+ try {
+ alterTableOptions.addColumn(column.getColumnName(), kuduType,
defaultValue);
+ } catch (IllegalArgumentException e) {
+ // TODO: Remove this when KUDU-1747 is fixed
+ throw new ImpalaRuntimeException("Error adding non-nullable column
to " +
+ "Kudu table " + tbl.getKuduTableName(), e);
+ }
+ }
+ }
+ try (KuduClient client =
KuduUtil.createKuduClient(tbl.getKuduMasterHosts())) {
+ client.alterTable(tbl.getKuduTableName(), alterTableOptions);
+ } catch (KuduException e) {
+ throw new ImpalaRuntimeException("Error adding columns to Kudu table " +
+ tbl.getKuduTableName(), e);
+ }
+ }
+
+ /**
+ * Drops a column from a Kudu table.
+ */
+ public static void dropColumn(KuduTable tbl, String colName)
+ throws ImpalaRuntimeException {
+ Preconditions.checkState(!Strings.isNullOrEmpty(colName));
+ AlterTableOptions alterTableOptions = new AlterTableOptions();
+ alterTableOptions.dropColumn(colName);
+ try (KuduClient client =
KuduUtil.createKuduClient(tbl.getKuduMasterHosts())) {
+ client.alterTable(tbl.getKuduTableName(), alterTableOptions);
+ } catch (KuduException e) {
+ throw new ImpalaRuntimeException(String.format("Error dropping column %s
from " +
+ "Kudu table %s", colName, tbl.getName()), e);
+ }
+ }
+
+ /**
+ * Changes the name of column.
+ */
+ public static void renameColumn(KuduTable tbl, String oldName, TColumn
newCol)
+ throws ImpalaRuntimeException {
+ Preconditions.checkState(!Strings.isNullOrEmpty(oldName));
+ Preconditions.checkNotNull(newCol);
+ AlterTableOptions alterTableOptions = new AlterTableOptions();
+ alterTableOptions.renameColumn(oldName, newCol.getColumnName());
+ try (KuduClient client =
KuduUtil.createKuduClient(tbl.getKuduMasterHosts())) {
+ client.alterTable(tbl.getKuduTableName(), alterTableOptions);
+ } catch (KuduException e) {
+ throw new ImpalaRuntimeException(String.format("Error renaming column %s
to %s " +
+ "for Kudu table %s", oldName, newCol.getColumnName(),
tbl.getName()), e);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index 94d1c1d..2a64b4c 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -1826,14 +1826,83 @@ public class AnalyzeDDLTest extends FrontendTestBase {
@Test
public void TestAlterKuduTable() {
TestUtils.assumeKuduIsSupported();
- // Alter table is not supported and should fail
- AnalysisError("ALTER TABLE functional_kudu.testtbl ADD COLUMNS (other
int)",
- "ALTER TABLE not allowed on Kudu table: functional_kudu.testtbl");
+ // ALTER TABLE ADD/DROP range partitions
+ String[] addDrop = {"add if not exists", "add", "drop if exists", "drop"};
+ for (String kw: addDrop) {
+ AnalyzesOk(String.format("alter table functional_kudu.testtbl %s range "
+
+ "partition 10 <= values < 20", kw));
+ AnalyzesOk(String.format("alter table functional_kudu.testtbl %s range "
+
+ "partition value = 30", kw));
+ AnalyzesOk(String.format("alter table functional_kudu.testtbl %s range "
+
+ "partition values < 100", kw));
+ AnalyzesOk(String.format("alter table functional_kudu.testtbl %s range "
+
+ "partition 10 <= values", kw));
+ AnalyzesOk(String.format("alter table functional_kudu.testtbl %s range "
+
+ "partition 1+1 <= values <= factorial(3)", kw));
+ AnalysisError(String.format("alter table functional.alltypes %s range " +
+ "partition 10 < values < 20", kw), "Table functional.alltypes does
not " +
+ "support range partitions: RANGE PARTITION 10 < VALUES < 20");
+ AnalysisError(String.format("alter table functional_kudu.testtbl %s
range " +
+ "partition values < isnull(null, null)", kw), "Range partition
values " +
+ "cannot be NULL. Range partition: 'PARTITION VALUES < isnull(NULL,
NULL)'");
+ }
- // Kudu tables can only be renamed or the table properties can be changed
+ // ALTER TABLE ADD COLUMNS
+ // Columns with different supported data types
+ AnalyzesOk("alter table functional_kudu.testtbl add columns (a1 tinyint
null, a2 " +
+ "smallint null, a3 int null, a4 bigint null, a5 string null, a6 float
null, " +
+ "a7 double null, a8 boolean null comment 'boolean')");
+ // Complex types
+ AnalysisError("alter table functional_kudu.testtbl add columns ( "+
+ "a struct<f1:int>)", "Kudu tables do not support complex types: " +
+ "a STRUCT<f1:INT>");
+ // Add primary key
+ AnalysisError("alter table functional_kudu.testtbl add columns (a int
primary key)",
+ "Cannot add a primary key using an ALTER TABLE ADD COLUMNS statement:
" +
+ "a INT PRIMARY KEY");
+ // Non-nullable columns require a default value
+ AnalyzesOk("alter table functional_kudu.testtbl add columns (a1 int not
null " +
+ "default 10)");
+ // Unsupported column options
+ String[] unsupportedColOptions = {"encoding rle", "compression lz4",
"block_size 10"};
+ for (String colOption: unsupportedColOptions) {
+ AnalysisError(String.format("alter table functional_kudu.testtbl add
columns " +
+ "(a1 int %s)", colOption), String.format("ENCODING, COMPRESSION and
" +
+ "BLOCK_SIZE options cannot be specified in an ALTER TABLE ADD
COLUMNS " +
+ "statement: a1 INT %s", colOption.toUpperCase()));
+ }
+ // REPLACE columns is not supported for Kudu tables
+ AnalysisError("alter table functional_kudu.testtbl replace columns (a int
null)",
+ "ALTER TABLE REPLACE COLUMNS is not supported on Kudu tables");
+ // Conflict with existing column
+ AnalysisError("alter table functional_kudu.testtbl add columns (zip int)",
+ "Column already exists: zip");
+ // Kudu column options on an HDFS table
+ AnalysisError("alter table functional.alltypes add columns (a int not
null)",
+ "The specified column options are only supported in Kudu tables: a INT
NOT NULL");
+
+ // ALTER TABLE DROP COLUMN
+ AnalyzesOk("alter table functional_kudu.testtbl drop column name");
+ AnalysisError("alter table functional_kudu.testtbl drop column no_col",
+ "Column 'no_col' does not exist in table: functional_kudu.testtbl");
+
+ // ALTER TABLE CHANGE COLUMN on Kudu tables
+ AnalyzesOk("alter table functional_kudu.testtbl change column name
new_name string");
+ // Unsupported column options
+ AnalysisError("alter table functional_kudu.testtbl change column zip
zip_code int " +
+ "encoding rle compression lz4 default 90000", "Unsupported column
options in " +
+ "ALTER TABLE CHANGE COLUMN statement: zip_code INT ENCODING RLE
COMPRESSION " +
+ "LZ4 DEFAULT 90000");
+ // Changing the column type is not supported for Kudu tables
+ AnalysisError("alter table functional_kudu.testtbl change column zip zip
bigint",
+ "Cannot change the type of a Kudu column using an ALTER TABLE CHANGE
COLUMN " +
+ "statement: (INT vs BIGINT)");
+
+ // Rename the underlying Kudu table
AnalyzesOk("ALTER TABLE functional_kudu.testtbl SET " +
"TBLPROPERTIES ('kudu.table_name' = 'Hans')");
+ // ALTER TABLE RENAME TO
AnalyzesOk("ALTER TABLE functional_kudu.testtbl RENAME TO new_testtbl");
}
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 3cef4ff..6833ee9 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -1975,6 +1975,11 @@ public class ParserTest extends FrontendTestBase {
"ALTER TABLE TestDb.Foo %s COLUMNS (i int)", addReplace));
ParsesOk(String.format(
"ALTER TABLE TestDb.Foo %s COLUMNS (i int comment 'hi')",
addReplace));
+ // Kudu column options
+ ParsesOk(String.format("ALTER TABLE Foo %s COLUMNS (i int PRIMARY KEY
NOT NULL " +
+ "ENCODING RLE COMPRESSION SNAPPY BLOCK_SIZE 1024 DEFAULT 10, " +
+ "j string NULL ENCODING PLAIN_ENCODING COMPRESSION LZ4 BLOCK_SIZE 10
" +
+ "DEFAULT 'test')", addReplace));
// Negative syntax tests
ParserError(String.format("ALTER TABLE TestDb.Foo %s COLUMNS i int",
addReplace));
@@ -2033,6 +2038,19 @@ public class ParserTest extends FrontendTestBase {
ParserError("ALTER TABLE ADD PARTITION (i=1)");
ParserError("ALTER TABLE ADD");
ParserError("ALTER TABLE DROP");
+
+ // Kudu range partitions
+ String[] ifNotExistsOption = {"IF NOT EXISTS", ""};
+ for (String option: ifNotExistsOption) {
+ ParsesOk(String.format("ALTER TABLE Foo ADD %s RANGE PARTITION 10 <
VALUES < 20",
+ option));
+ ParsesOk(String.format("ALTER TABLE Foo ADD %s RANGE PARTITION VALUE =
100",
+ option));
+ ParserError(String.format("ALTER TABLE Foo ADD %s RANGE PARTITION 10 <
VALUES " +
+ "<= 20, PARTITION 20 < VALUES <= 30", option));
+ ParserError(String.format("ALTER TABLE Foo ADD %s (RANGE PARTITION 10 <
VALUES " +
+ "<= 20)", option));
+ }
}
@Test
@@ -2078,6 +2096,21 @@ public class ParserTest extends FrontendTestBase {
ParserError(String.format("ALTER Foo DROP PARTITION (i=1) %s", kw));
ParserError(String.format("ALTER TABLE DROP PARTITION (i=1) %s", kw));
}
+
+ // Kudu range partitions
+ String[] ifExistsOption = {"IF EXISTS", ""};
+ for (String option: ifExistsOption) {
+ ParsesOk(String.format("ALTER TABLE Foo DROP %s RANGE PARTITION 10 <
VALUES < 20",
+ option));
+ ParsesOk(String.format("ALTER TABLE Foo DROP %s RANGE PARTITION VALUE =
100",
+ option));
+ ParserError(String.format("ALTER TABLE Foo DROP %s RANGE PARTITION 10 <
VALUES " +
+ "<= 20, PARTITION 20 < VALUES <= 30", option));
+ ParserError(String.format("ALTER TABLE Foo DROP %s (RANGE PARTITION 10 <
VALUES " +
+ "<= 20)", option));
+ ParserError(String.format("ALTER TABLE Foo DROP %s RANGE PARTITION VALUE
= 100 " +
+ "PURGE", option));
+ }
}
@Test
@@ -2087,6 +2120,9 @@ public class ParserTest extends FrontendTestBase {
for (String kw: columnKw) {
ParsesOk(String.format("ALTER TABLE Foo.Bar CHANGE %s c1 c2 int", kw));
ParsesOk(String.format("ALTER TABLE Foo CHANGE %s c1 c2 int comment
'hi'", kw));
+ // Kudu column options
+ ParsesOk(String.format("ALTER TABLE Foo CHANGE %s c1 c2 int comment 'hi'
" +
+ "NULL ENCODING PLAIN_ENCODING COMPRESSION LZ4 DEFAULT 10 BLOCK_SIZE
1024", kw));
// Negative syntax tests
ParserError(String.format("ALTER TABLE Foo CHANGE %s c1 int c2", kw));
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
----------------------------------------------------------------------
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
index 4572a5f..505e9fe 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_alter.test
@@ -5,7 +5,7 @@ create table simple (id int primary key, name string, valf
float, vali bigint)
---- RESULTS
====
---- QUERY
--- Alter master address to a different location
+# Alter master address to a different location
alter table simple set tblproperties (
'kudu.master_addresses' = 'localhost'
)
@@ -15,7 +15,7 @@ alter table simple set tblproperties (
STRING
====
---- QUERY
--- Show that new address is picked up
+# Show that new address is picked up
describe formatted simple
---- RESULTS: VERIFY_IS_SUBSET
'','kudu.master_addresses','localhost '
@@ -30,7 +30,7 @@ alter table simple set tblproperties ('kudu.master_addresses'
= '127.0.0.1')
STRING
====
---- QUERY
--- Try to use an invalid master address
+# Try to use an invalid master address
alter table simple set tblproperties ('kudu.master_addresses' = 'invalid_host')
---- CATCH
ImpalaRuntimeException: Kudu table 'impala::$DATABASE.simple' does not exist
on master 'invalid_host'
@@ -46,3 +46,314 @@ select count(*) from simple_new;
---- TYPES
BIGINT
====
+---- QUERY
+# Create a table with range distribution
+create table tbl_to_alter (id int primary key, name string null, vali bigint
not null)
+ distribute by range (id) (partition 1 < values <= 10) stored as kudu
+ tblproperties('kudu.table_name'='tbl_to_alter')
+---- RESULTS
+====
+---- QUERY
+# Add a range partition
+alter table tbl_to_alter add range partition 10 < values <= 20
+---- RESULTS
+====
+---- QUERY
+# Insert a row to the new partition
+insert into tbl_to_alter values (15, 'name', 100)
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
+NumRowErrors: 0
+---- LABELS
+ID, NAME, VALI
+---- DML_RESULTS: tbl_to_alter
+15,'name',100
+---- TYPES
+INT,STRING,BIGINT
+====
+---- QUERY
+# Add a singleton range partition
+alter table tbl_to_alter add range partition value = 100
+---- RESULTS
+====
+---- QUERY
+# Insert a row to the new partition
+insert into tbl_to_alter values (100, 'name1', 1000)
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
+NumRowErrors: 0
+---- LABELS
+ID, NAME, VALI
+---- DML_RESULTS: tbl_to_alter
+100,'name1',1000
+15,'name',100
+---- TYPES
+INT,STRING,BIGINT
+====
+---- QUERY
+# Add an unbounded range partition
+alter table tbl_to_alter add range partition 1000 < values
+---- RESULTS
+====
+---- QUERY
+# Try to insert a partition that overlaps with an existing partition
+alter table tbl_to_alter add range partition 10 < values <= 30
+---- CATCH
+NonRecoverableException: New range partition conflicts with existing range
partition: [(int32 id=11), (int32 id=31))
+====
+---- QUERY
+# Try to insert a partition that overlaps with an existing partition, use IF
NOT EXISTS
+# to hide the error
+alter table tbl_to_alter add if not exists range partition 10 < values <= 30
+---- RESULTS
+====
+---- QUERY
+# Drop one of the recently inserted partitions
+alter table tbl_to_alter drop range partition value = 100
+---- RESULTS
+====
+---- QUERY
+# Select table rows after one partition was dropped
+select * from tbl_to_alter
+---- RESULTS
+15,'name',100
+---- TYPES
+INT,STRING,BIGINT
+====
+---- QUERY
+# Drop an existing range partition
+alter table tbl_to_alter drop range partition 10 < values <= 20
+---- RESULTS
+====
+---- QUERY
+# Drop all the range partitions
+alter table tbl_to_alter drop range partition 1 < values <= 10;
+alter table tbl_to_alter drop range partition 1000 < values
+---- RESULTS
+====
+---- QUERY
+# Retrieve the rows of a table after all the partitions got dropped
+select count(*), count(id) from tbl_to_alter
+ where id = 1 and cast(sin(id) as boolean) = true
+---- RESULTS
+0,0
+---- TYPES
+BIGINT,BIGINT
+====
+---- QUERY
+# Insert into a table that has no partitions
+insert into tbl_to_alter values (1, 'name', 100)
+---- RUNTIME_PROFILE
+NumModifiedRows: 0
+NumRowErrors: 1
+====
+---- QUERY
+# Add non-nullable columns
+alter table tbl_to_alter add range partition 1 < values <= 20;
+alter table tbl_to_alter add columns (new_col1 int not null default 10,
+ new_col2 bigint not null default 1000)
+---- RESULTS
+====
+---- QUERY
+# Insert a row that has values for the new columns
+insert into tbl_to_alter values (2, 'test', 100, 1, 100)
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
+NumRowErrors: 0
+---- LABELS
+ID, NAME, VALI, NEW_COL1, NEW_COL2
+---- DML_RESULTS: tbl_to_alter
+2,'test',100,1,100
+---- TYPES
+INT,STRING,BIGINT,INT,BIGINT
+====
+---- QUERY
+# Insert a row that doesn't have values for the new columns; defaults are used
+insert into tbl_to_alter (id,name,vali) values (3, 'test', 200)
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
+NumRowErrors: 0
+---- LABELS
+ID, NAME, VALI, NEW_COL1, NEW_COL2
+---- DML_RESULTS: tbl_to_alter
+2,'test',100,1,100
+3,'test',200,10,1000
+---- TYPES
+INT,STRING,BIGINT,INT,BIGINT
+====
+---- QUERY
+# Insert a row that has nulls on non-nullable columns with default values
+insert into tbl_to_alter values (9, 'test', 300, null, null)
+---- RUNTIME_PROFILE
+NumModifiedRows: 0
+NumRowErrors: 1
+---- LABELS
+ID, NAME, VALI, NEW_COL1, NEW_COL2
+---- DML_RESULTS: tbl_to_alter
+2,'test',100,1,100
+3,'test',200,10,1000
+---- TYPES
+INT,STRING,BIGINT,INT,BIGINT
+====
+---- QUERY
+# Add a nullable column
+alter table tbl_to_alter add columns (new_col3 string null)
+---- RESULTS
+====
+---- QUERY
+# Add a row
+insert into tbl_to_alter values ((4, 'test', 300, 1, 100, null),
+ (5, 'test', 400, 2, 200, 'names'))
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
+NumRowErrors: 0
+---- LABELS
+ID, NAME, VALI, NEW_COL1, NEW_COL2, NEW_COL3
+---- DML_RESULTS: tbl_to_alter
+2,'test',100,1,100,'NULL'
+3,'test',200,10,1000,'NULL'
+4,'test',300,1,100,'NULL'
+5,'test',400,2,200,'names'
+---- TYPES
+INT,STRING,BIGINT,INT,BIGINT,STRING
+====
+---- QUERY
+# Add a row that doesn't have a value for the last added column
+insert into tbl_to_alter (id, name, vali, new_col1, new_col2)
+ values (6, 'test', 500, 3, 300)
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
+NumRowErrors: 0
+---- LABELS
+ID, NAME, VALI, NEW_COL1, NEW_COL2, NEW_COL3
+---- DML_RESULTS: tbl_to_alter
+2,'test',100,1,100,'NULL'
+3,'test',200,10,1000,'NULL'
+4,'test',300,1,100,'NULL'
+5,'test',400,2,200,'names'
+6,'test',500,3,300,'NULL'
+---- TYPES
+INT,STRING,BIGINT,INT,BIGINT,STRING
+====
+---- QUERY
+# Add a nullable column with a default value
+alter table tbl_to_alter add columns (invalid_col int null default 10)
+---- CATCH
+Error adding nullable column to Kudu table
+====
+---- QUERY
+# Add a non-nullable column without a default value
+alter table tbl_to_alter add columns (invalid_col int not null)
+---- CATCH
+Error adding non-nullable column to Kudu table
+====
+---- QUERY
+# Drop a column
+alter table tbl_to_alter drop column vali
+---- RESULTS
+====
+---- QUERY
+# Retrieve table rows after column got dropped
+select * from tbl_to_alter
+---- RESULTS
+2,'test',1,100,'NULL'
+3,'test',10,1000,'NULL'
+4,'test',1,100,'NULL'
+5,'test',2,200,'names'
+6,'test',3,300,'NULL'
+---- TYPES
+INT,STRING,INT,BIGINT,STRING
+====
+---- QUERY
+# Try to drop a primary key column
+alter table tbl_to_alter drop column id
+---- CATCH
+NonRecoverableException: cannot remove a key column
+====
+---- QUERY
+# Rename a column
+alter table tbl_to_alter change column new_col3 last_name string
+---- RESULTS
+====
+---- QUERY
+# Ensure the renamed column is accessible
+select id, last_name from tbl_to_alter
+---- RESULTS
+2,'NULL'
+3,'NULL'
+4,'NULL'
+5,'names'
+6,'NULL'
+---- TYPES
+INT,STRING
+====
+---- QUERY
+# Rename the underlying Kudu table
+alter table tbl_to_alter set
tblproperties('kudu.table_name'='kudu_tbl_to_alter')
+---- RESULTS
+'Updated table.'
+====
+---- QUERY
+# Create a new table and try to rename to an existing kudu table
+create table copy_of_tbl (a int primary key) distribute by hash (a) into 3
buckets
+ stored as kudu;
+alter table copy_of_tbl set
tblproperties('kudu.table_name'='kudu_tbl_to_alter')
+---- CATCH
+ImpalaRuntimeException: Error renaming Kudu table copy_of_tbl to
kudu_tbl_to_alter
+====
+---- QUERY
+# Ensure the Kudu table is accessible
+select count(*) from tbl_to_alter
+---- RESULTS
+5
+---- TYPES
+BIGINT
+====
+---- QUERY
+# Rename the Impala table
+alter table tbl_to_alter rename to kudu_tbl_to_alter
+---- RESULTS
+====
+---- QUERY
+# Ensure the Impala table is accessible after it got renamed
+select count(*) from kudu_tbl_to_alter
+---- RESULTS
+5
+---- TYPES
+BIGINT
+====
+---- QUERY
+# Rename Kudu table and insert a number of rows
+alter table copy_of_tbl set tblproperties('kudu.table_name'='shared_kudu_tbl');
+insert into copy_of_tbl values (1), (2), (3)
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
+NumRowErrors: 0
+---- LABELS
+A
+---- DML_RESULTS: copy_of_tbl
+1
+2
+3
+---- TYPES
+INT
+====
+---- QUERY
+# Create an external table
+create external table external_tbl stored as kudu
+tblproperties('kudu.table_name'='kudu_tbl_to_alter');
+select count(*) from external_tbl
+---- RESULTS
+5
+---- TYPES
+BIGINT
+====
+---- QUERY
+# Change the external table to point to a different Kudu table
+alter table external_tbl set
tblproperties('kudu.table_name'='shared_kudu_tbl');
+select count(*) from external_tbl
+---- RESULTS
+3
+---- TYPES
+BIGINT
+====
http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/9f497ba0/tests/query_test/test_kudu.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_kudu.py b/tests/query_test/test_kudu.py
index 4ea8770..3401ae2 100644
--- a/tests/query_test/test_kudu.py
+++ b/tests/query_test/test_kudu.py
@@ -68,6 +68,7 @@ class TestKuduOperations(KuduTestSuite):
self.run_test_case('QueryTest/kudu_describe', vector,
use_db=unique_database)
def test_kudu_column_options(self, cursor, kudu_client, unique_database):
+ """Test Kudu column options"""
encodings = ["ENCODING PLAIN_ENCODING", ""]
compressions = ["COMPRESSION SNAPPY", ""]
nullability = ["NOT NULL", "NULL", ""]
@@ -89,6 +90,18 @@ class TestKuduOperations(KuduTestSuite):
kudu_tbl_name = "impala::%s.%s" % (unique_database,
impala_tbl_name)
assert kudu_client.table_exists(kudu_tbl_name)
+ def test_kudu_rename_table(self, cursor, kudu_client, unique_database):
+ """Test Kudu table rename"""
+ cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY) DISTRIBUTE BY
HASH(a)
+ INTO 3 BUCKETS STORED AS KUDU""" % unique_database)
+ kudu_tbl_name = "impala::%s.foo" % unique_database
+ assert kudu_client.table_exists(kudu_tbl_name)
+ new_kudu_tbl_name = "blah"
+ cursor.execute("ALTER TABLE %s.foo SET
TBLPROPERTIES('kudu.table_name'='%s')" % (
+ unique_database, new_kudu_tbl_name))
+ assert kudu_client.table_exists(new_kudu_tbl_name)
+ assert not kudu_client.table_exists(kudu_tbl_name)
+
class TestCreateExternalTable(KuduTestSuite):
def test_implicit_table_props(self, cursor, kudu_client):