[ 
https://issues.apache.org/jira/browse/CARBONDATA-3950?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Chetan Bhat closed CARBONDATA-3950.
-----------------------------------
    Fix Version/s: 2.1.0
       Resolution: Fixed

Issue is fixed in latest Carbon 2.1.0 build

> Alter table drop column for non partition column throws error
> -------------------------------------------------------------
>
>                 Key: CARBONDATA-3950
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-3950
>             Project: CarbonData
>          Issue Type: Bug
>          Components: data-query
>    Affects Versions: 2.0.1
>         Environment: Spark 2.4.5
>            Reporter: Chetan Bhat
>            Priority: Minor
>             Fix For: 2.1.0
>
>
> From spark-sql the queries are executed as mentioned below-
> drop table if exists uniqdata_int;
> CREATE TABLE uniqdata_int (CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB 
> timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 
> bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 
> decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double, INTEGER_COLUMN1 
> int) Partitioned by (cust_id int) stored as carbondata TBLPROPERTIES 
> ("TABLE_BLOCKSIZE"= "256 MB");
> LOAD DATA INPATH 'hdfs://hacluster/chetan/2000_UniqData.csv' into table 
> uniqdata_int partition(cust_id='1') OPTIONS ('FILEHEADER'='CUST_ID,CUST_NAME 
> ,ACTIVE_EMUI_VERSION,DOB,DOJ, 
> BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1, 
> Double_COLUMN2,INTEGER_COLUMN1','BAD_RECORDS_ACTION'='FORCE');
> show partitions uniqdata_int;
> select * from uniqdata_int order by cust_id;
> alter table uniqdata_int add columns(id int);
>  desc uniqdata_int;
>  *alter table uniqdata_int drop columns(CUST_NAME);*
>  desc uniqdata_int;
> Issue : Alter table drop column for non partition column throws error even 
> though the operation is success.
> org.apache.carbondata.spark.exception.ProcessMetaDataException: operation 
> failed for priyesh.uniqdata_int: Alterion failed: 
> org.apache.hadoop.hive.ql.metadata.HiveException: Unable to alter table. The 
> following columns have he existing columns in their respective positions :
> col;
>  at 
> org.apache.spark.sql.execution.command.MetadataProcessOperation$class.throwMetadataException(package.
>  at 
> org.apache.spark.sql.execution.command.MetadataCommand.throwMetadataException(package.scala:120)
>  at 
> org.apache.spark.sql.execution.command.schema.CarbonAlterTableDropColumnCommand.processMetadata(Carboand.scala:201)
>  at 
> org.apache.spark.sql.execution.command.MetadataCommand$$anonfun$run$1.apply(package.scala:123)
>  at 
> org.apache.spark.sql.execution.command.MetadataCommand$$anonfun$run$1.apply(package.scala:123)
>  at 
> org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
>  at 
> org.apache.spark.sql.execution.command.MetadataCommand.runWithAudit(package.scala:120)
>  at 
> org.apache.spark.sql.execution.command.MetadataCommand.run(package.scala:123)
>  at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala
>  at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:69)
>  at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:80)
>  at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:196)
>  at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:196)
>  at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3379)
>  at 
> org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:95
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:144)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:86)
>  at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3378)
>  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:196)
>  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:79)
>  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:651)
>  at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:67)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:387)
>  at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:406)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:279)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)
>  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>  at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>  at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>  at java.lang.reflect.Method.invoke(Method.java:498)
>  at 
> org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
>  at 
> org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:87
>  at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:164)
>  at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:187)
>  at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:89)
>  at 
> org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:951)
>  at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:960)
>  at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
>  
> spark-sql> desc uniqdata_int;
> active_emui_version string NULL
> dob timestamp NULL
> doj timestamp NULL
> bigint_column1 bigint NULL
> bigint_column2 bigint NULL
> decimal_column1 decimal(30,10) NULL
> decimal_column2 decimal(36,10) NULL
> double_column1 double NULL
> double_column2 double NULL
> integer_column1 int NULL
> id int
> cust_id int NULL
> # Partition Information
> # col_name data_type comment
> cust_id int NULL
> Time taken: 0.126 seconds, Fetched 15 row(s)



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to