[ 
https://issues.apache.org/jira/browse/CARBONDATA-902?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Neha Bhardwaj updated CARBONDATA-902:
-------------------------------------
    Description: 
Decimal data type raises exception while selecting the data from the table in 
hive.

Steps to reproduce:
1) In Spark Shell :

 a) Create Table -
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.CarbonSession._

val carbon = 
SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://localhost:54310/opt/data")
 
 scala> carbon.sql(""" create table testHive1(id int,name string,dob 
timestamp,experience decimal,salary double,incentive bigint) stored 
by'carbondata' """).show 

 b) Load Data - 
scala> carbon.sql(""" load data inpath 
'hdfs://localhost:54310/Files/testHive1.csv' into table testHive1 """ ).show


2) In Hive : 

 a) Add Jars - 
add jar 
/home/neha/incubator-carbondata/assembly/target/scala-2.11/carbondata_2.11-1.1.0-incubating-SNAPSHOT-shade-hadoop2.7.2.jar;
add jar /opt/spark-2.1.0-bin-hadoop2.7/jars/spark-catalyst_2.11-2.1.0.jar;
add jar 
/home/neha/incubator-carbondata/integration/hive/carbondata-hive-1.1.0-incubating-SNAPSHOT.jar;
 
 
 b) Create Table -
create table testHive1(id int,name string,dob timestamp,experience 
decimal,salary double,incentive bigint);

c) Alter location - 
hive> alter table testHive1 set LOCATION 
'hdfs://localhost:54310/opt/data/default/testhive1' ;

 d) Set Properties - 
set hive.mapred.supports.subdirectories=true;
set mapreduce.input.fileinputformat.input.dir.recursive=true;

d) Alter FileFormat -
alter table testHive1 set FILEFORMAT
INPUTFORMAT "org.apache.carbondata.hive.MapredCarbonInputFormat"
OUTPUTFORMAT "org.apache.carbondata.hive.MapredCarbonOutputFormat"
SERDE "org.apache.carbondata.hive.CarbonHiveSerDe";

 f) Execute Queries - 
select * from testHive1;

3) Query :
hive> select * from testHive1;

Expected Output : 
ResultSet should display all the data present in the table.

Result:
Exception in thread "[main][partitionID:testhive1;queryID:8945394553892]" 
java.lang.NoClassDefFoundError: org/apache/spark/sql/types/Decimal
        at 
org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector.getMeasureData(AbstractScannedResultCollector.java:109)
        at 
org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector.fillMeasureData(AbstractScannedResultCollector.java:78)
        at 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillMeasureData(DictionaryBasedResultCollector.java:158)
        at 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectData(DictionaryBasedResultCollector.java:115)
        at 
org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.next(DataBlockIteratorImpl.java:51)
        at 
org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.next(DataBlockIteratorImpl.java:32)
        at 
org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:50)
        at 
org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:41)
        at 
org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:31)
        at 
org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.<init>(ChunkRowIterator.java:41)
        at 
org.apache.carbondata.hive.CarbonHiveRecordReader.initialize(CarbonHiveRecordReader.java:84)
        at 
org.apache.carbondata.hive.CarbonHiveRecordReader.<init>(CarbonHiveRecordReader.java:66)
        at 
org.apache.carbondata.hive.MapredCarbonInputFormat.getRecordReader(MapredCarbonInputFormat.java:68)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator$FetchInputFormatSplit.getRecordReader(FetchOperator.java:673)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator.getRecordReader(FetchOperator.java:323)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator.getNextRow(FetchOperator.java:445)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator.pushRow(FetchOperator.java:414)
        at org.apache.hadoop.hive.ql.exec.FetchTask.fetch(FetchTask.java:140)
        at org.apache.hadoop.hive.ql.Driver.getResults(Driver.java:1670)
        at 
org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:233)
        at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:165)
        at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
        at 
org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:736)
        at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:681)
        at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:621)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:497)
        at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
        at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
Caused by: java.lang.ClassNotFoundException: org.apache.spark.sql.types.Decimal
        at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
        ... 31 more
 

  was:
Decimal data type raises exception while selecting the data from the table in 
hive.

Steps to reproduce:
1) In Spark Shell :

 a) Create Table -
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.CarbonSession._

val carbon = 
SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://localhost:54310/opt/data")
 
 scala> carbon.sql(""" create table testHive1(id int,name string,dob 
timestamp,experience decimal,salary double,incentive bigint) stored 
by'carbondata' """).show 

 b) Load Data - 
scala> carbon.sql(""" load data inpath 
'hdfs://localhost:54310/Files/testHive1.csv' into table testHive1 """ ).show


2) In Hive : 

 a) Add Jars - 
add jar 
/home/neha/incubator-carbondata/assembly/target/scala-2.11/carbondata_2.11-1.1.0-incubating-SNAPSHOT-shade-hadoop2.7.2.jar;
add jar /opt/spark-2.1.0-bin-hadoop2.7/jars/spark-catalyst_2.11-2.1.0.jar;      
 
 b) Set Properties - 
set hive.mapred.supports.subdirectories=true;
set mapreduce.input.fileinputformat.input.dir.recursive=true;

c) Alter location - 
hive> alter table testHive1 set LOCATION 
'hdfs://localhost:54310/opt/data/default/testhive1' ;

d) Alter FileFormat -
alter table testHive1 set FILEFORMAT
INPUTFORMAT "org.apache.carbondata.hive.MapredCarbonInputFormat"
OUTPUTFORMAT "org.apache.carbondata.hive.MapredCarbonOutputFormat"
SERDE "org.apache.carbondata.hive.CarbonHiveSerDe";

 e) Create Table -
create table testHive1(id int,name string,dob timestamp,experience 
decimal,salary double,incentive bigint);

 f) Execute Queries - 
select * from testHive1;

3) Query :
hive> select * from testHive1;

Expected Output : 
ResultSet should display all the data present in the table.

Result:
Exception in thread "[main][partitionID:testhive1;queryID:8945394553892]" 
java.lang.NoClassDefFoundError: org/apache/spark/sql/types/Decimal
        at 
org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector.getMeasureData(AbstractScannedResultCollector.java:109)
        at 
org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector.fillMeasureData(AbstractScannedResultCollector.java:78)
        at 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillMeasureData(DictionaryBasedResultCollector.java:158)
        at 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectData(DictionaryBasedResultCollector.java:115)
        at 
org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.next(DataBlockIteratorImpl.java:51)
        at 
org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.next(DataBlockIteratorImpl.java:32)
        at 
org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:50)
        at 
org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:41)
        at 
org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:31)
        at 
org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.<init>(ChunkRowIterator.java:41)
        at 
org.apache.carbondata.hive.CarbonHiveRecordReader.initialize(CarbonHiveRecordReader.java:84)
        at 
org.apache.carbondata.hive.CarbonHiveRecordReader.<init>(CarbonHiveRecordReader.java:66)
        at 
org.apache.carbondata.hive.MapredCarbonInputFormat.getRecordReader(MapredCarbonInputFormat.java:68)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator$FetchInputFormatSplit.getRecordReader(FetchOperator.java:673)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator.getRecordReader(FetchOperator.java:323)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator.getNextRow(FetchOperator.java:445)
        at 
org.apache.hadoop.hive.ql.exec.FetchOperator.pushRow(FetchOperator.java:414)
        at org.apache.hadoop.hive.ql.exec.FetchTask.fetch(FetchTask.java:140)
        at org.apache.hadoop.hive.ql.Driver.getResults(Driver.java:1670)
        at 
org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:233)
        at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:165)
        at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
        at 
org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:736)
        at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:681)
        at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:621)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:497)
        at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
        at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
Caused by: java.lang.ClassNotFoundException: org.apache.spark.sql.types.Decimal
        at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
        ... 31 more
 


> NoClassDefFoundError for Decimal datatype during select queries
> ---------------------------------------------------------------
>
>                 Key: CARBONDATA-902
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-902
>             Project: CarbonData
>          Issue Type: Bug
>          Components: data-query
>         Environment: Spark 2.1, Hive 1.2.1
>            Reporter: Neha Bhardwaj
>            Assignee: Rahul Kumar
>            Priority: Minor
>         Attachments: testHive1.csv
>
>
> Decimal data type raises exception while selecting the data from the table in 
> hive.
> Steps to reproduce:
> 1) In Spark Shell :
>  a) Create Table -
> import org.apache.spark.sql.SparkSession
> import org.apache.spark.sql.CarbonSession._
> val carbon = 
> SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://localhost:54310/opt/data")
>  
>  scala> carbon.sql(""" create table testHive1(id int,name string,dob 
> timestamp,experience decimal,salary double,incentive bigint) stored 
> by'carbondata' """).show 
>  b) Load Data - 
> scala> carbon.sql(""" load data inpath 
> 'hdfs://localhost:54310/Files/testHive1.csv' into table testHive1 """ ).show
> 2) In Hive : 
>  a) Add Jars - 
> add jar 
> /home/neha/incubator-carbondata/assembly/target/scala-2.11/carbondata_2.11-1.1.0-incubating-SNAPSHOT-shade-hadoop2.7.2.jar;
> add jar /opt/spark-2.1.0-bin-hadoop2.7/jars/spark-catalyst_2.11-2.1.0.jar;
> add jar 
> /home/neha/incubator-carbondata/integration/hive/carbondata-hive-1.1.0-incubating-SNAPSHOT.jar;
>        
>  
>  b) Create Table -
> create table testHive1(id int,name string,dob timestamp,experience 
> decimal,salary double,incentive bigint);
> c) Alter location - 
> hive> alter table testHive1 set LOCATION 
> 'hdfs://localhost:54310/opt/data/default/testhive1' ;
>  d) Set Properties - 
> set hive.mapred.supports.subdirectories=true;
> set mapreduce.input.fileinputformat.input.dir.recursive=true;
> d) Alter FileFormat -
> alter table testHive1 set FILEFORMAT
> INPUTFORMAT "org.apache.carbondata.hive.MapredCarbonInputFormat"
> OUTPUTFORMAT "org.apache.carbondata.hive.MapredCarbonOutputFormat"
> SERDE "org.apache.carbondata.hive.CarbonHiveSerDe";
>  f) Execute Queries - 
> select * from testHive1;
> 3) Query :
> hive> select * from testHive1;
> Expected Output : 
> ResultSet should display all the data present in the table.
> Result:
> Exception in thread "[main][partitionID:testhive1;queryID:8945394553892]" 
> java.lang.NoClassDefFoundError: org/apache/spark/sql/types/Decimal
>       at 
> org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector.getMeasureData(AbstractScannedResultCollector.java:109)
>       at 
> org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector.fillMeasureData(AbstractScannedResultCollector.java:78)
>       at 
> org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillMeasureData(DictionaryBasedResultCollector.java:158)
>       at 
> org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectData(DictionaryBasedResultCollector.java:115)
>       at 
> org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.next(DataBlockIteratorImpl.java:51)
>       at 
> org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.next(DataBlockIteratorImpl.java:32)
>       at 
> org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:50)
>       at 
> org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:41)
>       at 
> org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:31)
>       at 
> org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.<init>(ChunkRowIterator.java:41)
>       at 
> org.apache.carbondata.hive.CarbonHiveRecordReader.initialize(CarbonHiveRecordReader.java:84)
>       at 
> org.apache.carbondata.hive.CarbonHiveRecordReader.<init>(CarbonHiveRecordReader.java:66)
>       at 
> org.apache.carbondata.hive.MapredCarbonInputFormat.getRecordReader(MapredCarbonInputFormat.java:68)
>       at 
> org.apache.hadoop.hive.ql.exec.FetchOperator$FetchInputFormatSplit.getRecordReader(FetchOperator.java:673)
>       at 
> org.apache.hadoop.hive.ql.exec.FetchOperator.getRecordReader(FetchOperator.java:323)
>       at 
> org.apache.hadoop.hive.ql.exec.FetchOperator.getNextRow(FetchOperator.java:445)
>       at 
> org.apache.hadoop.hive.ql.exec.FetchOperator.pushRow(FetchOperator.java:414)
>       at org.apache.hadoop.hive.ql.exec.FetchTask.fetch(FetchTask.java:140)
>       at org.apache.hadoop.hive.ql.Driver.getResults(Driver.java:1670)
>       at 
> org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:233)
>       at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:165)
>       at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
>       at 
> org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:736)
>       at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:681)
>       at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:621)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:497)
>       at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
>       at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
> Caused by: java.lang.ClassNotFoundException: 
> org.apache.spark.sql.types.Decimal
>       at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
>       at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
>       at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
>       ... 31 more
>  



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to