Akash R Nilugal created CARBONDATA-2031:
-------------------------------------------

             Summary: Select column with is null for no_inverted_index column 
throws java.lang.ArrayIndexOutOfBoundsException
                 Key: CARBONDATA-2031
                 URL: https://issues.apache.org/jira/browse/CARBONDATA-2031
             Project: CarbonData
          Issue Type: Bug
            Reporter: Akash R Nilugal
            Assignee: Akash R Nilugal
         Attachments: dest.csv

steps:

{color:#333333}1) create table zerorows_part (c1 string,c2 int,c3 string,c5 
string) STORED BY 'carbondata' 
TBLPROPERTIES('DICTIONARY_INCLUDE'='C2','NO_INVERTED_INDEX'='C2'){color}

{color:#333333}2){color}{color:#333333}LOAD DATA LOCAL INPATH 
'$filepath/dest.csv' INTO table zerorows_part 
OPTIONS('delimiter'=',','fileheader'='c1,c2,c3,c5'){color}

{color:#333333}3){color}{color:#333333}select c2 from zerorows_part where c2 is 
null{color}

 

*Previous exception in task: java.util.concurrent.ExecutionException: 
java.lang.ArrayIndexOutOfBoundsException: 0*
    
*org.apache.carbondata.core.scan.processor.AbstractDataBlockIterator.updateScanner(AbstractDataBlockIterator.java:136)*
    
*org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl.processNextBatch(DataBlockIteratorImpl.java:64)*
    
*org.apache.carbondata.core.scan.result.iterator.VectorDetailQueryResultIterator.processNextBatch(VectorDetailQueryResultIterator.java:46)*
    
*org.apache.carbondata.spark.vectorreader.VectorizedCarbonRecordReader.nextBatch(VectorizedCarbonRecordReader.java:283)*
    
*org.apache.carbondata.spark.vectorreader.VectorizedCarbonRecordReader.nextKeyValue(VectorizedCarbonRecordReader.java:171)*
    
*org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:370)*
    
*org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.scan_nextBatch$(Unknown
 Source)*
    
*org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)*
    
*org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)*
    
*org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:395)*
    
*org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:234)*
    
*org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228)*
    
*org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)*
    
*org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)*
    *org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)*
    *org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)*
    *org.apache.spark.rdd.RDD.iterator(RDD.scala:287)*
    *org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)*
    *org.apache.spark.scheduler.Task.run(Task.scala:108)*
    *org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)*
    
*java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)*
    
*java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)*
    *java.lang.Thread.run(Thread.java:748)*
    *at 
org.apache.spark.TaskContextImpl.invokeListeners(TaskContextImpl.scala:138)*
    *at 
org.apache.spark.TaskContextImpl.markTaskCompleted(TaskContextImpl.scala:116)*
    *at org.apache.spark.scheduler.Task.run(Task.scala:118)*
    *at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)*
    *at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)*
    *at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)*
    *at java.lang.Thread.run(Thread.java:748)*

 

 

{color:#333333}[^dest.csv]{color}

{color:#333333} {color}

 

 

 


**



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to