[ 
https://issues.apache.org/jira/browse/CARBONDATA-3846?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Ajantha Bhat resolved CARBONDATA-3846.
--------------------------------------
    Fix Version/s: 2.1.0
       Resolution: Fixed

> Dataload fails for boolean column configured as BUCKET_COLUMNS
> --------------------------------------------------------------
>
>                 Key: CARBONDATA-3846
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-3846
>             Project: CarbonData
>          Issue Type: Bug
>          Components: data-query
>    Affects Versions: 2.0.0
>         Environment: Spark 2.3.2, Spark 2.4.5
>            Reporter: Chetan Bhat
>            Priority: Minor
>             Fix For: 2.1.0
>
>          Time Spent: 50m
>  Remaining Estimate: 0h
>
> *Steps-*
> 0: jdbc:hive2://10.20.255.171:23040/default> create table if not exists 
> all_data_types1(*bool_1 boolean*,bool_2 boolean,chinese string,Number 
> int,smallNumber smallint,BigNumber bigint,LargeDecimal double,smalldecimal 
> float,customdecimal decimal(38,15),words string,smallwords char(8),varwords 
> varchar(20),time timestamp,day date,emptyNumber int,emptysmallNumber 
> smallint,emptyBigNumber bigint,emptyLargeDecimal double,emptysmalldecimal 
> float,emptycustomdecimal decimal(38,38),emptywords string,emptysmallwords 
> char(8),emptyvarwords varchar(20)) stored as carbondata TBLPROPERTIES 
> ('BUCKET_NUMBER'='1', '*BUCKET_COLUMNS'='bool_1*');
>  +----------+-+
> |Result|
> +----------+-+
>  +----------+-+
>  No rows selected (0.939 seconds)
>  0: jdbc:hive2://10.20.255.171:23040/default> LOAD DATA INPATH 
> 'hdfs://hacluster/chetan/datafile_0.csv' into table all_data_types1 
> OPTIONS('DELIMITER'=',' , 
> 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='bool_1 ,bool_2 
> ,chinese ,Number ,smallNumber ,BigNumber ,LargeDecimal ,smalldecimal 
> ,customdecimal,words ,smallwords ,varwords ,time ,day ,emptyNumber 
> ,emptysmallNumber ,emptyBigNumber ,emptyLargeDecimal 
> ,emptysmalldecimal,emptycustomdecimal ,emptywords ,emptysmallwords 
> ,emptyvarwords');
>  *Error: java.lang.Exception: DataLoad failure: (state=,code=0)*
>  
> *Log-*
> java.lang.Exception: DataLoad failure: 
>  at 
> org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:560)
>  at 
> org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:207)
>  at 
> org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:168)
>  at 
> org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:148)
>  at 
> org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:145)
>  at 
> org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
>  at 
> org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:141)
>  at 
> org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:145)
>  at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>  at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>  at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
>  at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:190)
>  at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:190)
>  at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3259)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
>  at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3258)
>  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:190)
>  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:75)
>  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:642)
>  at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:232)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:175)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:171)
>  at java.security.AccessController.doPrivileged(Native Method)
>  at javax.security.auth.Subject.doAs(Subject.java:422)
>  at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:185)
>  at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>  at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>  at java.lang.Thread.run(Thread.java:745)
>  2020-06-05 02:05:56,789 | ERROR | [HiveServer2-Background-Pool: Thread-138] 
> | Error running hive query: | 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:179)
>  org.apache.hive.service.cli.HiveSQLException: java.lang.Exception: DataLoad 
> failure: 
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:269)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:175)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:171)
>  at java.security.AccessController.doPrivileged(Native Method)
>  at javax.security.auth.Subject.doAs(Subject.java:422)
>  at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698)
>  at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:185)
>  at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>  at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>  at java.lang.Thread.run(Thread.java:745)
>  2020-06-05 02:05:56,792 | INFO | [HiveServer2-Handler-Pool: Thread-97] | 
> Asked to cancel job group 072ea11a-9b63-48c6-bed2-2f396ad82b36 | 
> org.apache.spark.internal.Logging$class.logInfo(Logging.scala:54)
>  
>  
> Data -
> true,false,这个AsdASD,1,1,1,1.0000000122399999025191164037096314132213592529296875,536.6699999999999590727384202182292938232421875,1.0000535685000000452049562227330170571804046630859375,RWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,563FXN1S,563FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,2,2,2,2.00000001224000012456372132874093949794769287109375,537.6699999999999590727384202182292938232421875,2.00005356850000026724956114776432514190673828125,SWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,663FXN1S,663FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,3,3,3,3.00000001224000012456372132874093949794769287109375,538.6699999999999590727384202182292938232421875,3.00005356850000026724956114776432514190673828125,TWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,763FXN1S,763FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,4,4,4,4.00000001224000012456372132874093949794769287109375,539.6699999999999590727384202182292938232421875,4.00005356850000026724956114776432514190673828125,UWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,863FXN1S,863FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,5,5,5,5.00000001224000012456372132874093949794769287109375,540.6699999999999590727384202182292938232421875,5.00005356850000026724956114776432514190673828125,VWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,963FXN1S,963FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,6,6,6,6.00000001224000012456372132874093949794769287109375,541.6699999999999590727384202182292938232421875,6.00005356850000026724956114776432514190673828125,WWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,073FXN1S,073FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,7,7,7,7.00000001224000012456372132874093949794769287109375,542.6699999999999590727384202182292938232421875,7.00005356850000026724956114776432514190673828125,XWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,173FXN1S,173FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,8,8,8,8.00000001224000101274214102886617183685302734375,543.6699999999999590727384202182292938232421875,8.00005356850000026724956114776432514190673828125,YWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,273FXN1S,273FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,9,9,9,9.00000001224000101274214102886617183685302734375,544.6699999999999590727384202182292938232421875,9.00005356850000026724956114776432514190673828125,ZWEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,373FXN1S,373FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,
> true,false,这个AsdASD,10,10,10,10.00000001224000101274214102886617183685302734375,545.6699999999999590727384202182292938232421875,10.00005356850000026724956114776432514190673828125,AXEQWEQWEAAAAAAAAAAAAAAAAAAAAAAAA,473FXN1S,473FXN1SAAAAAAAAAAA,1970-01-02
>  05:01:00.001,1970-01-01,,,,,,,,,



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to