[ 
https://issues.apache.org/jira/browse/CARBONDATA-1849?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Kunal Kapoor reassigned CARBONDATA-1849:
----------------------------------------

    Assignee: Kunal Kapoor

> n Create pre aggregate table failled
> ------------------------------------
>
>                 Key: CARBONDATA-1849
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-1849
>             Project: CarbonData
>          Issue Type: Bug
>          Components: spark-integration
>            Reporter: Mohammad Shahid Khan
>            Assignee: Kunal Kapoor
>
> Create pre aggregate table is failing 
>    spark.sql("""create table carbontable (c1 string,c2 int,c3 string,c5 
> string) STORED BY 'org.apache.carbondata.format'""")
>     spark.sql("insert into carbontable select 'a',1,'aa','aaa'")
>     spark. sql("insert into carbontable select 'b',1,'aa','aaa'")
>     spark.sql("insert into carbontable select 'a',10,'aa','aaa'")
>     spark.sql("create datamap preagg1 on table carbontable using 
> 'preaggregate' as select c1,sum(c2) from carbontable group by c1")
> Exception:
> {code}
> Exception in thread "main" 
> org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException:
>  CSV header in DDL is not proper. Column names in schema and CSV header are 
> not the same.
>       at 
> org.apache.carbondata.spark.util.CommonUtil$.getCsvHeaderColumns(CommonUtil.scala:637)
>       at 
> org.apache.carbondata.spark.util.DataLoadingUtil$.buildCarbonLoadModel(DataLoadingUtil.scala:312)
>       at 
> org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:133)
>       at 
> org.apache.spark.sql.execution.command.DataCommand.run(package.scala:71)
>       at 
> org.apache.spark.sql.execution.command.preaaggregate.CreatePreAggregateTableCommand.processData(CreatePreAggregateTableCommand.scala:139)
>       at 
> org.apache.spark.sql.execution.command.datamap.CarbonCreateDataMapCommand.processData(CarbonCreateDataMapCommand.scala:80)
>       at 
> org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:86)
>       at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
>       at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
>       at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
>       at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
>       at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
>       at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
>       at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>       at 
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
>       at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
>       at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)
>       at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)
>       at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185)
>       at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
>       at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)
>       at 
> org.apache.carbondata.examples.CarbonSessionExample$.main(CarbonSessionExample.scala:35)
>       at 
> org.apache.carbondata.examples.CarbonSessionExample.main(CarbonSessionExample.scala)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:498)
>       at com.intellij.rt.execution.application.AppMain.main(AppMain.java:147)
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to