Repository: impala Updated Branches: refs/heads/master 3fc42ded0 -> f8b406222
IMPALA-4323: "SET ROW FORMAT" option added to "ALTER TABLE" command Examples of new command: ALTER TABLE t1 SET ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002'; ALTER TABLE t1 SET ROW FORMAT DELIMITED LINES TERMINATED BY '\001'; Testing: Added parser tests and unit tests for alter statements including partition options. Change-Id: I96e347463504915a6f33932552e4d1f61e9b1154 Reviewed-on: http://gerrit.cloudera.org:8080/8928 Reviewed-by: Alex Behm <[email protected]> Tested-by: Impala Public Jenkins Project: http://git-wip-us.apache.org/repos/asf/impala/repo Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/4c43cace Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/4c43cace Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/4c43cace Branch: refs/heads/master Commit: 4c43cace877c9d7455eff8329ed3b285f51bf84d Parents: 3fc42de Author: Adam Holley <[email protected]> Authored: Tue Jan 2 13:55:40 2018 -0600 Committer: Impala Public Jenkins <[email protected]> Committed: Tue Jan 16 23:58:24 2018 +0000 ---------------------------------------------------------------------- common/thrift/JniCatalog.thrift | 13 +++ fe/src/main/cup/sql-parser.cup | 35 +++++--- .../analysis/AlterTableSetRowFormatStmt.java | 90 +++++++++++++++++++ .../catalog/HiveStorageDescriptorFactory.java | 21 +++-- .../impala/service/CatalogOpExecutor.java | 60 ++++++++++++- .../apache/impala/analysis/AnalyzeDDLTest.java | 28 ++++++ .../org/apache/impala/analysis/ParserTest.java | 20 +++++ .../queries/QueryTest/alter-table.test | 92 ++++++++++++++++++++ 8 files changed, 339 insertions(+), 20 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/common/thrift/JniCatalog.thrift ---------------------------------------------------------------------- diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift index 6e746c7..939e276 100644 --- a/common/thrift/JniCatalog.thrift +++ b/common/thrift/JniCatalog.thrift @@ -71,6 +71,7 @@ enum TAlterTableType { UPDATE_STATS, SET_CACHED, RECOVER_PARTITIONS, + SET_ROW_FORMAT, } // Parameters of CREATE DATABASE commands @@ -263,6 +264,15 @@ struct TAlterTableSetFileFormatParams { 2: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set } +// Parameters for ALTER TABLE SET [PARTITION partitionSet] ROW FORMAT commands. +struct TAlterTableSetRowFormatParams { + // New row format. + 1: required TTableRowFormat row_format + + // An optional partition set, set if modifying the row format of the partitions. + 2: optional list<list<CatalogObjects.TPartitionKeyValue>> partition_set +} + // Parameters for ALTER TABLE SET [PARTITION partitionSpec] location commands. struct TAlterTableSetLocationParams { // New HDFS storage location of the table. @@ -349,6 +359,9 @@ struct TAlterTableParams { // Parameters for ALTER TABLE ADD/ADD RANGE PARTITION 14: optional TAlterTableAddDropRangePartitionParams add_drop_range_partition_params + + // Parameters for ALTER TABLE SET ROW FORMAT + 15: optional TAlterTableSetRowFormatParams set_row_format_params } // Parameters of CREATE TABLE LIKE commands http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/fe/src/main/cup/sql-parser.cup ---------------------------------------------------------------------- diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup index c2ed3f6..668bb88 100644 --- a/fe/src/main/cup/sql-parser.cup +++ b/fe/src/main/cup/sql-parser.cup @@ -431,7 +431,7 @@ nonterminal ArrayList<ColumnDef> column_def_list, partition_column_defs, view_column_def_list, view_column_defs; nonterminal ArrayList<StructField> struct_field_def_list; // Options for DDL commands - CREATE/DROP/ALTER -nonterminal HdfsCachingOp cache_op_val; +nonterminal HdfsCachingOp cache_op_val, opt_cache_op_val; nonterminal BigDecimal opt_cache_op_replication; nonterminal String comment_val, opt_comment_val; nonterminal Boolean external_val; @@ -444,7 +444,7 @@ nonterminal Boolean if_not_exists_val; nonterminal Boolean is_primary_key_val; nonterminal Boolean replace_existing_cols_val; nonterminal HdfsUri location_val; -nonterminal RowFormat row_format_val; +nonterminal RowFormat row_format_val, opt_row_format_val; nonterminal String field_terminator_val; nonterminal String line_terminator_val; nonterminal String escaped_by_val; @@ -975,7 +975,7 @@ opt_kw_role ::= ; partition_def ::= - partition_spec:partition location_val:location cache_op_val:cache_op + partition_spec:partition location_val:location opt_cache_op_val:cache_op {: RESULT = new PartitionDef(partition, location, cache_op); :} ; @@ -1047,11 +1047,14 @@ alter_tbl_stmt ::= if (partition != null) parser.parseError("set", SqlParserSymbols.KW_SET); RESULT = new AlterTableSetColumnStats(table, col, map); :} + | KW_ALTER KW_TABLE table_name:table opt_partition_set:partition KW_SET + row_format_val:row_format + {: + RESULT = new AlterTableSetRowFormatStmt(table, partition, row_format); + :} | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET cache_op_val:cache_op {: - // Ensure a parser error is thrown for ALTER statements if no cache op is specified. - if (cache_op == null) parser.parseError("set", SqlParserSymbols.KW_SET); RESULT = new AlterTableSetCachedStmt(table, partitions, cache_op); :} | KW_ALTER KW_TABLE table_name:table KW_RECOVER KW_PARTITIONS @@ -1236,9 +1239,9 @@ primary_keys ::= ; tbl_options ::= - opt_sort_cols:sort_cols opt_comment_val:comment row_format_val:row_format + opt_sort_cols:sort_cols opt_comment_val:comment opt_row_format_val:row_format serde_properties:serde_props file_format_create_table_val:file_format - location_val:location cache_op_val:cache_op + location_val:location opt_cache_op_val:cache_op tbl_properties:tbl_props {: CreateTableStmt.unescapeProperties(serde_props); @@ -1403,13 +1406,18 @@ create_uda_stmt ::= :} ; +opt_cache_op_val ::= + cache_op_val:cache_op + {: RESULT = cache_op; :} + | /* empty */ + {: RESULT = null; :} + ; + cache_op_val ::= KW_CACHED KW_IN STRING_LITERAL:pool_name opt_cache_op_replication:replication {: RESULT = new HdfsCachingOp(pool_name, replication); :} | KW_UNCACHED {: RESULT = new HdfsCachingOp(); :} - | /* empty */ - {: RESULT = null; :} ; opt_cache_op_replication ::= @@ -1466,12 +1474,17 @@ if_not_exists_val ::= {: RESULT = false; :} ; +opt_row_format_val ::= + row_format_val:row_format + {: RESULT = row_format; :} + |/* empty */ + {: RESULT = null; :} + ; + row_format_val ::= KW_ROW KW_FORMAT KW_DELIMITED field_terminator_val:field_terminator escaped_by_val:escaped_by line_terminator_val:line_terminator {: RESULT = new RowFormat(field_terminator, line_terminator, escaped_by); :} - |/* empty */ - {: RESULT = null; :} ; escaped_by_val ::= http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java new file mode 100644 index 0000000..52f7a32 --- /dev/null +++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetRowFormatStmt.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.impala.analysis; + +import java.util.Collection; + +import org.apache.impala.catalog.HdfsFileFormat; +import org.apache.impala.catalog.HdfsPartition; +import org.apache.impala.catalog.HdfsTable; +import org.apache.impala.catalog.KuduTable; +import org.apache.impala.catalog.RowFormat; +import org.apache.impala.catalog.Table; +import org.apache.impala.common.AnalysisException; +import org.apache.impala.thrift.TAlterTableParams; +import org.apache.impala.thrift.TAlterTableSetRowFormatParams; +import org.apache.impala.thrift.TAlterTableType; + +/** + * Represents an ALTER TABLE [PARTITION partitionSet] SET ROW FORMAT statement. + */ +public class AlterTableSetRowFormatStmt extends AlterTableSetStmt { + private final RowFormat rowFormat_; + + public AlterTableSetRowFormatStmt(TableName tableName, + PartitionSet partitionSet, RowFormat rowFormat) { + super(tableName, partitionSet); + rowFormat_ = rowFormat; + } + + public RowFormat getRowFormat() { return rowFormat_; } + + @Override + public TAlterTableParams toThrift() { + TAlterTableParams params = super.toThrift(); + params.setAlter_type(TAlterTableType.SET_ROW_FORMAT); + TAlterTableSetRowFormatParams rowFormatParams = + new TAlterTableSetRowFormatParams(getRowFormat().toThrift()); + if (getPartitionSet() != null) { + rowFormatParams.setPartition_set(getPartitionSet().toThrift()); + } + params.setSet_row_format_params(rowFormatParams); + return params; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException { + super.analyze(analyzer); + Table tbl = getTargetTable(); + if (!(tbl instanceof HdfsTable)) { + throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is only " + + "supported on HDFS tables. Conflicting table: %1$s", tbl.getFullName())); + } + if (partitionSet_ != null) { + for (HdfsPartition partition: partitionSet_.getPartitions()) { + if (partition.getFileFormat() != HdfsFileFormat.TEXT && + partition.getFileFormat() != HdfsFileFormat.SEQUENCE_FILE) { + throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is " + + "only supported on TEXT or SEQUENCE file formats. " + + "Conflicting partition/format: %1$s / %2$s", partition.getPartitionName(), + HdfsFileFormat.fromHdfsInputFormatClass( + partition.getFileFormat().inputFormat()).name())); + } + } + } else { + HdfsFileFormat format = HdfsFileFormat.fromHdfsInputFormatClass( + ((HdfsTable) tbl).getMetaStoreTable().getSd().getInputFormat()); + if (format != HdfsFileFormat.TEXT && + format != HdfsFileFormat.SEQUENCE_FILE) { + throw new AnalysisException(String.format("ALTER TABLE SET ROW FORMAT is " + + "only supported on TEXT or SEQUENCE file formats. Conflicting " + + "table/format: %1$s / %2$s", tbl.getFullName(), format.name())); + } + } + } +} http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java b/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java index 342e8d7..b1bd003 100644 --- a/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java +++ b/fe/src/main/java/org/apache/impala/catalog/HiveStorageDescriptorFactory.java @@ -19,9 +19,10 @@ package org.apache.impala.catalog; import java.util.HashMap; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; - import org.apache.impala.thrift.THdfsFileFormat; + import com.google.common.base.Preconditions; public class HiveStorageDescriptorFactory { @@ -46,18 +47,26 @@ public class HiveStorageDescriptorFactory { sd.setInputFormat(hdfsFileFormat.inputFormat()); sd.setOutputFormat(hdfsFileFormat.outputFormat()); sd.getSerdeInfo().setSerializationLib(hdfsFileFormat.serializationLib()); + setSerdeInfo(rowFormat, sd.getSerdeInfo()); + return sd; + } + /** + * Updates the serde info with the specified RowFormat. This method is used when + * just updating the row format and not the entire storage descriptor. + */ + public static void setSerdeInfo(RowFormat rowFormat, SerDeInfo serdeInfo) { if (rowFormat.getFieldDelimiter() != null) { - sd.getSerdeInfo().putToParameters( + serdeInfo.putToParameters( "serialization.format", rowFormat.getFieldDelimiter()); - sd.getSerdeInfo().putToParameters("field.delim", rowFormat.getFieldDelimiter()); + serdeInfo.putToParameters("field.delim", rowFormat.getFieldDelimiter()); } if (rowFormat.getEscapeChar() != null) { - sd.getSerdeInfo().putToParameters("escape.delim", rowFormat.getEscapeChar()); + serdeInfo.putToParameters("escape.delim", rowFormat.getEscapeChar()); } if (rowFormat.getLineDelimiter() != null) { - sd.getSerdeInfo().putToParameters("line.delim", rowFormat.getLineDelimiter()); + serdeInfo.putToParameters("line.delim", rowFormat.getLineDelimiter()); } - return sd; + } } http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java index 295956c..df3b10b 100644 --- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java +++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java @@ -98,6 +98,7 @@ import org.apache.impala.thrift.TAlterTableParams; import org.apache.impala.thrift.TAlterTableSetCachedParams; import org.apache.impala.thrift.TAlterTableSetFileFormatParams; import org.apache.impala.thrift.TAlterTableSetLocationParams; +import org.apache.impala.thrift.TAlterTableSetRowFormatParams; import org.apache.impala.thrift.TAlterTableSetTblPropertiesParams; import org.apache.impala.thrift.TAlterTableType; import org.apache.impala.thrift.TAlterTableUpdateStatsParams; @@ -141,6 +142,7 @@ import org.apache.impala.thrift.TResultSetMetadata; import org.apache.impala.thrift.TStatus; import org.apache.impala.thrift.TTable; import org.apache.impala.thrift.TTableName; +import org.apache.impala.thrift.TTableRowFormat; import org.apache.impala.thrift.TTableStats; import org.apache.impala.thrift.TTruncateParams; import org.apache.impala.thrift.TUpdateCatalogRequest; @@ -461,6 +463,20 @@ public class CatalogOpExecutor { } setResultSet = true; break; + case SET_ROW_FORMAT: + TAlterTableSetRowFormatParams rowFormatParams = + params.getSet_row_format_params(); + reloadFileMetadata = alterTableSetRowFormat(tbl, + rowFormatParams.getPartition_set(), rowFormatParams.getRow_format(), + numUpdatedPartitions); + if (rowFormatParams.isSetPartition_set()) { + resultColVal.setString_val( + "Updated " + numUpdatedPartitions.getRef() + " partition(s)."); + } else { + resultColVal.setString_val("Updated table."); + } + setResultSet = true; + break; case SET_LOCATION: TAlterTableSetLocationParams setLocationParams = params.getSet_location_params(); @@ -2212,9 +2228,8 @@ public class CatalogOpExecutor { /** * Changes the file format for the given table or partitions. This is a metadata only - * operation, existing table data will not be converted to the new format. After - * changing the file format the table metadata is marked as invalid and will be - * reloaded on the next access. + * operation, existing table data will not be converted to the new format. Returns + * true if the file metadata to be reloaded. */ private boolean alterTableSetFileFormat(Table tbl, List<List<TPartitionKeyValue>> partitionSet, THdfsFileFormat fileFormat, @@ -2249,6 +2264,45 @@ public class CatalogOpExecutor { } /** + * Changes the row format for the given table or partitions. This is a metadata only + * operation, existing table data will not be converted to the new format. Returns + * true if the file metadata to be reloaded. + */ + private boolean alterTableSetRowFormat(Table tbl, + List<List<TPartitionKeyValue>> partitionSet, TTableRowFormat tRowFormat, + Reference<Long> numUpdatedPartitions) + throws ImpalaException { + Preconditions.checkState(tbl.getLock().isHeldByCurrentThread()); + Preconditions.checkState(partitionSet == null || !partitionSet.isEmpty()); + Preconditions.checkArgument(tbl instanceof HdfsTable); + boolean reloadFileMetadata = false; + RowFormat rowFormat = RowFormat.fromThrift(tRowFormat); + if (partitionSet == null) { + org.apache.hadoop.hive.metastore.api.Table msTbl = + tbl.getMetaStoreTable().deepCopy(); + StorageDescriptor sd = msTbl.getSd(); + HiveStorageDescriptorFactory.setSerdeInfo(rowFormat, sd.getSerdeInfo()); + // The default partition must be updated if the row format is changed so that new + // partitions are created with the new file format. + ((HdfsTable) tbl).addDefaultPartition(msTbl.getSd()); + applyAlterTable(msTbl); + reloadFileMetadata = true; + } else { + List<HdfsPartition> partitions = + ((HdfsTable) tbl).getPartitionsFromPartitionSet(partitionSet); + List<HdfsPartition> modifiedParts = Lists.newArrayList(); + for(HdfsPartition partition: partitions) { + HiveStorageDescriptorFactory.setSerdeInfo(rowFormat, partition.getSerdeInfo()); + modifiedParts.add(partition); + } + TableName tableName = tbl.getTableName(); + bulkAlterPartitions(tableName.getDb(), tableName.getTbl(), modifiedParts); + numUpdatedPartitions.setRef((long) modifiedParts.size()); + } + return reloadFileMetadata; + } + + /** * Helper method for setting the file format on a given storage descriptor. */ private static void setStorageDescriptorFileFormat(StorageDescriptor sd, http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java ---------------------------------------------------------------------- diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java index 53ee930..72b7f58 100644 --- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java +++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java @@ -465,6 +465,34 @@ public class AnalyzeDDLTest extends FrontendTestBase { } @Test + public void TestAlterTableSetRowFormat() throws AnalysisException { + AnalyzesOk("alter table functional.alltypes set row format delimited " + + "fields terminated by ' '"); + AnalyzesOk("alter table functional.alltypes partition (year=2010) set row format " + + "delimited fields terminated by ' '"); + AnalyzesOk("alter table functional_seq.alltypes set row format delimited " + + "fields terminated by ' '"); + AnalysisError("alter table functional.alltypesnopart PARTITION (month=1) " + + "set row format delimited fields terminated by ' '", + "Table is not partitioned: functional.alltypesnopart"); + String [] unsupportedFileFormatDbs = + {"functional_parquet", "functional_rc", "functional_avro"}; + for (String format: unsupportedFileFormatDbs) { + AnalysisError("alter table " + format + ".alltypes set row format delimited " + + "fields terminated by ' '", "ALTER TABLE SET ROW FORMAT is only supported " + + "on TEXT or SEQUENCE file formats"); + } + AnalysisError("alter table functional_kudu.alltypes set row format delimited " + + "fields terminated by ' '", "ALTER TABLE SET ROW FORMAT is only supported " + + "on HDFS tables"); + AnalysisError("alter table functional.alltypesmixedformat partition(year=2009) " + + "set row format delimited fields terminated by ' '", + "ALTER TABLE SET ROW FORMAT is only supported on TEXT or SEQUENCE file formats"); + AnalyzesOk("alter table functional.alltypesmixedformat partition(year=2009,month=1) " + + "set row format delimited fields terminated by ' '"); + } + + @Test public void TestAlterTableSet() throws AnalysisException { AnalyzesOk("alter table functional.alltypes set fileformat sequencefile"); AnalyzesOk("alter table functional.alltypes set location '/a/b'"); http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/fe/src/test/java/org/apache/impala/analysis/ParserTest.java ---------------------------------------------------------------------- diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java index 59a1e85..40585bf 100644 --- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java +++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java @@ -2313,6 +2313,26 @@ public class ParserTest extends FrontendTestBase { ParserError("ALTER TABLE TestDb.Foo SET LOCATION"); ParserError("ALTER TABLE TestDb.Foo SET"); + ParsesOk("ALTER TABLE Foo SET ROW FORMAT DELIMITED FIELDS TERMINATED BY ','"); + ParsesOk("ALTER TABLE Foo SET ROW FORMAT DELIMITED LINES TERMINATED BY '\n'"); + ParsesOk("ALTER TABLE Foo SET ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' " + + "LINES TERMINATED BY '\n'"); + ParsesOk("ALTER TABLE Foo SET ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' " + + "ESCAPED BY '\' LINES TERMINATED BY '\n'"); + ParsesOk("ALTER TABLE Foo PARTITION (i=1) SET ROW FORMAT DELIMITED " + + "FIELDS TERMINATED BY ','"); + ParsesOk("ALTER TABLE Foo PARTITION (i=1) SET ROW FORMAT DELIMITED " + + "LINES TERMINATED BY '\n'"); + ParsesOk("ALTER TABLE Foo PARTITION (i=1) SET ROW FORMAT DELIMITED " + + "FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n'"); + ParsesOk("ALTER TABLE Foo PARTITION (i=1) SET ROW FORMAT DELIMITED " + + "FIELDS TERMINATED BY ',' ESCAPED BY '\' LINES TERMINATED BY '\n'"); + ParserError("ALTER TABLE Foo SET ROW FORMAT"); + ParserError("ALTER TABLE Foo SET ROW FORMAT DELIMITED FIELDS"); + ParserError("ALTER TABLE Foo PARTITION () SET ROW FORMAT DELIMITED " + + "FIELDS TERMINATED BY ','"); + ParserError("ALTER TABLE Foo PARTITION (i=1) SET ROW FORMAT"); + String[] tblPropTypes = {"TBLPROPERTIES", "SERDEPROPERTIES"}; String[] partClauses = {"", "PARTITION(k1=10, k2=20)"}; for (String propType: tblPropTypes) { http://git-wip-us.apache.org/repos/asf/impala/blob/4c43cace/testdata/workloads/functional-query/queries/QueryTest/alter-table.test ---------------------------------------------------------------------- diff --git a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test index e2bcf4e..555a599 100644 --- a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test +++ b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test @@ -1334,3 +1334,95 @@ select count(*) from insert_sorted_partitioned; ---- RESULTS 6 ==== +---- QUERY +# IMPALA-4323: Test alter row format statement. Ensure alter statement updates metadata. +# Spaces after text will become field delimiters after the alter statement. +# Tildes at the end of the line will become line delimiters after the second alter statement. +create table del_table (c1 string, c2 string, c3 string) +row format delimited fields terminated by '\002' lines terminated by '\001' stored as textfile; +insert into del_table values ("the ", "quick ", "brown~"), +("fox ","jumped ","over~"); +select * from del_table order by c3; +---- RESULTS +'the ','quick ','brown~' +'fox ','jumped ','over~' +---- TYPES +STRING,STRING,STRING +==== +---- QUERY +# Test select after alter to ensure field delimiters change to spaces and +# the line delimiters remain '\001'. +alter table del_table set row format delimited fields terminated by ' '; +select * from del_table order by c3; +---- RESULTS +'the','\x02quick','\x02brown~' +'fox','\x02jumped','\x02over~' +---- TYPES +STRING,STRING,STRING +==== +---- QUERY +# Test select after alter to ensure line delimiters change. +# We end up with an extra record because the new delimiter is +# before the original delimiter which now becomes data. +alter table del_table set row format delimited fields terminated by '\002' +lines terminated by '~'; +select * from del_table order by c3; +---- RESULTS +'\x01','NULL','' +'the ','quick ','brown' +'\x01fox ','jumped ','over' +---- TYPES +STRING,STRING,STRING +==== +---- QUERY +# IMPALA-4323: Test alter row format statement with partitions. +# Ensure alter statement updates metadata. +create table del_table_part (c1 string, c2 string, c3 string) partitioned by (c0 int) +row format delimited fields terminated by '\002' lines terminated by '\001' stored as textfile; +insert into del_table_part partition (c0=0) values +("the ", "quick ", "brown"); +insert into del_table_part partition (c0=1) values +("fox ","jumped ","over"); +select * from del_table_part order by c0; +---- RESULTS +'the ','quick ','brown',0 +'fox ','jumped ','over',1 +---- TYPES +STRING,STRING,STRING,INT +==== +---- QUERY +# Test select after alter to ensure only one partition changes. +alter table del_table_part partition (c0=1) set row format delimited fields terminated by ' '; +select * from del_table_part order by c0; +---- RESULTS +'the ','quick ','brown',0 +'fox','\x02jumped','\x02over',1 +---- TYPES +STRING,STRING,STRING,INT +==== +---- QUERY +# Test select after alter table to ensure no partition changes. +alter table del_table_part set row format delimited fields terminated by '_'; +select * from del_table_part order by c0; +---- RESULTS +'the ','quick ','brown',0 +'fox','\x02jumped','\x02over',1 +---- TYPES +STRING,STRING,STRING,INT +==== +---- QUERY +# Ensure new partitions use table-level format +# First we set a terminator, insert some data in a new partition that should use it +# then change the terminator so the original will show in the results. +alter table del_table_part set row format delimited fields terminated by '_'; +insert into del_table_part partition (c0=2) values +('the','\002lazy','\002dog'); +alter table del_table_part partition (c0=2) set row format delimited fields terminated by '\002'; +select * from del_table_part order by c0; +---- RESULTS +'the ','quick ','brown',0 +'fox','\x02jumped','\x02over',1 +'the_','lazy_','dog',2 +---- TYPES +STRING,STRING,STRING,INT +====
