[
https://issues.apache.org/jira/browse/CARBONDATA-693?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
SWATI RAO closed CARBONDATA-693.
--------------------------------
Resolution: Fixed
this issue is invalid
> Issue in select query for function Ceiling & Floor
> --------------------------------------------------
>
> Key: CARBONDATA-693
> URL: https://issues.apache.org/jira/browse/CARBONDATA-693
> Project: CarbonData
> Issue Type: Bug
> Components: sql
> Affects Versions: 1.0.0-incubating
> Environment: Spark1.6
> Reporter: SWATI RAO
> Priority: Trivial
> Attachments: Test_Data1.csv
>
>
> When executed in Hive it is working Fine, But when we execute same query in
> carbondata it gives an error.
> PushUP_FILTER_Test_Boundary_TC059
> 0: jdbc:hive2://hadoop-master:10000> create table Test_Boundary2 (c1_int
> int,c2_Bigint Bigint,c3_Decimal Decimal(38,30),c4_double double,c5_string
> string,c6_Timestamp Timestamp,c7_Datatype_Desc string) ;
> +---------+--+
> | result |
> +---------+--+
> +---------+--+
> No rows selected (0.101 seconds)
> 0: jdbc:hive2://hadoop-master:10000> show tables ;
> +-----------------+--------------+--+
> | tableName | isTemporary |
> +-----------------+--------------+--+
> | test_boundary | false |
> | test_boundary2 | false |
> +-----------------+--------------+--+
> 2 rows selected (0.02 seconds)
> 0: jdbc:hive2://hadoop-master:10000> load data local inpath
> '/home/server/Desktop/Test_Data1.csv' into table test_boundary2 ;
> +---------+--+
> | Result |
> +---------+--+
> +---------+--+
> No rows selected (0.201 seconds)
> 0: jdbc:hive2://hadoop-master:10000> select c3_Decimal from Test_Boundary
> where floor(c3_Decimal)=0.00 or floor(c3_Decimal) IS NULL ;
> +-------------+--+
> | c3_Decimal |
> +-------------+--+
> +-------------+--+
> No rows selected (0.315 seconds)
> 0: jdbc:hive2://hadoop-master:10000> select count(*) from Test_Boundary ;
> +------+--+
> | _c0 |
> +------+--+
> | 0 |
> +------+--+
> CarbonData
> 0: jdbc:hive2://hadoop-master:10000> create table Test_Boundary (c1_int
> int,c2_Bigint Bigint,c3_Decimal Decimal(38,30),c4_double double,c5_string
> string,c6_Timestamp Timestamp,c7_Datatype_Desc string) STORED BY
> 'org.apache.carbondata.format' ;
> +---------+--+
> | Result |
> +---------+--+
> +---------+--+
> No rows selected (0.535 seconds)
> 0: jdbc:hive2://hadoop-master:10000> select count(*) from Test_Boundary ;
> +------+--+
> | _c0 |
> +------+--+
> | 0 |
> +------+--+
> 1 row selected (1.883 seconds)
>
> hdfs://192.168.2.145:54310hdfs://192.168.2.145:54310/HDFS_URL/BabuStore/Data/Test_Data1.csv
> (state=,code=0)
> 0: jdbc:hive2://hadoop-master:10000> LOAD DATA INPATH
> 'hdfs://192.168.2.145:54310/BabuStore/Data/Test_Data1.csv' INTO table
> Test_Boundary
> OPTIONS('DELIMITER'=',','QUOTECHAR'='','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='')
> ;
> +---------+--+
> | Result |
> +---------+--+
> +---------+--+
> No rows selected (2.436 seconds)
> 0: jdbc:hive2://hadoop-master:10000> select c3_Decimal from Test_Boundary
> where floor(c3_Decimal)=0.00 or floor(c3_Decimal) IS NULL ;
> Error: org.apache.spark.SparkException: Job aborted due to stage failure:
> Task 0 in stage 7.0 failed 4 times, most recent failure: Lost task 0.3 in
> stage 7.0 (TID 16, hadoop-master): org.apache.spark.sql.AnalysisException:
> Decimal scale (0) cannot be greater than precision (-28).;
> at org.apache.spark.sql.types.PrecisionInfo.<init>(DecimalType.scala:32)
> at org.apache.spark.sql.types.DecimalType.<init>(DecimalType.scala:68)
> at
> org.apache.spark.sql.types.DecimalType$.bounded(DecimalType.scala:155)
> at org.apache.spark.sql.types.Decimal.floor(Decimal.scala:326)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificPredicate.eval(Unknown
> Source)
> at
> org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate$$anonfun$create$2.apply(GeneratePredicate.scala:68)
> at
> org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate$$anonfun$create$2.apply(GeneratePredicate.scala:68)
> at
> org.apache.spark.sql.execution.Filter$$anonfun$2$$anonfun$apply$2.apply(basicOperators.scala:74)
> at
> org.apache.spark.sql.execution.Filter$$anonfun$2$$anonfun$apply$2.apply(basicOperators.scala:72)
> at scala.collection.Iterator$$anon$14.hasNext(Iterator.scala:390)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> at scala.collection.Iterator$class.foreach(Iterator.scala:727)
> at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
> at
> scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
> at
> scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
> at
> scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
> at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
> at scala.collection.AbstractIterator.to(Iterator.scala:1157)
> at
> scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
> at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
> at
> scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
> at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
> at
> org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
> at
> org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
> at
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
> at
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
> at org.apache.spark.scheduler.Task.run(Task.scala:89)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:745)
> PushUP_FILTER_Test_Boundary_TC065 : select c3_Decimal from Test_Boundary
> where floor(c3_Decimal)<=0.0 or floor(c3_Decimal) IS NOT NULL
> select c3_Decimal from Test_Boundary where ceil(c3_Decimal)=0.0 or
> ceiling(c3_Decimal) IS NULL
> PushUP_FILTER_Test_Boundary_TC085 select c3_Decimal from Test_Boundary where
> ceil(c3_Decimal)<=0.0 or ceiling(c3_Decimal) IS NOT NULL
> PushUP_FILTER_Test_Boundary_TC088:select
> min(c1_int),max(c1_int),sum(c1_int),avg(c1_int) , count(c1_int),
> variance(c1_int) from Test_Boundary where rand(c1_int)=0.6201007799387834 or
> rand(c1_int)=0.45540022789662593
--
This message was sent by Atlassian JIRA
(v6.3.15#6346)