[ 
https://issues.apache.org/jira/browse/SPARK-6772?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14486853#comment-14486853
 ] 

Aditya Parmar commented on SPARK-6772:
--------------------------------------

Please find the code below

import java.util.ArrayList;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.api.java.DataType;
import org.apache.spark.sql.api.java.JavaSchemaRDD;
import org.apache.spark.sql.api.java.Row;
import org.apache.spark.sql.api.java.StructField;
import org.apache.spark.sql.api.java.StructType;
import org.apache.spark.sql.hive.api.java.JavaHiveContext;

public class engineshow {

       public static void main(String[] args) {

              SparkConf conf = new SparkConf().setAppName("Engine");
                         
              JavaSparkContext sc = new JavaSparkContext(conf);
              JavaHiveContext hContext = new JavaHiveContext(sc);

              String sch;
              List<StructField> fields;
              StructType schema;
              JavaRDD<Row> rowRDD;
              JavaRDD<String> input;


              JavaSchemaRDD[] inputs = new JavaSchemaRDD[2];
              sch = "a b c d e f g h i"; // input file schema
              input = sc.textFile("/home/aditya/stocks1.csv");
              fields = new ArrayList<StructField>();
              for (String fieldName : sch.split(" ")) {
                     fields.add(DataType.createStructField(fieldName,
                                  DataType.StringType, true));
              }
              schema = DataType.createStructType(fields);
              rowRDD = input.map(new Function<String, Row>() {
                     public Row call(String record) throws Exception {
                           String[] fields = record.split(",");
                           Object[] fields_converted = fields;
                           return Row.create(fields_converted);
                     }
              });

              inputs[0] = hContext.applySchema(rowRDD, schema);
              inputs[0].registerTempTable("comp1");

              sch = "a b";
              fields = new ArrayList<StructField>();
              for (String fieldName : sch.split(" ")) {
                     fields.add(DataType.createStructField(fieldName,
                                  DataType.StringType, true));
              }

              schema = DataType.createStructType(fields);
              inputs[1] = hContext.sql("select a,b from comp1");
              inputs[1].saveAsTextFile("/home/aditya/outputog");
       }
}


> spark sql error when running code on large number of records
> ------------------------------------------------------------
>
>                 Key: SPARK-6772
>                 URL: https://issues.apache.org/jira/browse/SPARK-6772
>             Project: Spark
>          Issue Type: Test
>          Components: SQL
>    Affects Versions: 1.2.0
>            Reporter: Aditya Parmar
>
> Hi all ,
> I am getting an Arrayoutboundsindex error when i try to run a simple 
> filtering colums query on a file with 2.5 lac records.runs fine when running 
> on a file with 2k records .
> {code}
> 15/04/08 16:54:06 INFO TaskSetManager: Starting task 0.1 in stage 0.0 (TID 3, 
> blrwfl11189.igatecorp.com, PROCESS_LOCAL, 1350 bytes)
> 15/04/08 16:54:06 INFO TaskSetManager: Lost task 1.1 in stage 0.0 (TID 2) on 
> executor blrwfl11189.igatecorp.com: java.lang.ArrayIndexOutOfBoundsException 
> (null) [duplicate 1]
> 15/04/08 16:54:06 INFO TaskSetManager: Starting task 1.2 in stage 0.0 (TID 4, 
> blrwfl11189.igatecorp.com, PROCESS_LOCAL, 1350 bytes)
> 15/04/08 16:54:06 INFO TaskSetManager: Lost task 1.2 in stage 0.0 (TID 4) on 
> executor blrwfl11189.igatecorp.com: java.lang.ArrayIndexOutOfBoundsException 
> (null) [duplicate 2]
> 15/04/08 16:54:06 INFO TaskSetManager: Starting task 1.3 in stage 0.0 (TID 5, 
> blrwfl11189.igatecorp.com, PROCESS_LOCAL, 1350 bytes)
> 15/04/08 16:54:06 INFO TaskSetManager: Lost task 0.1 in stage 0.0 (TID 3) on 
> executor blrwfl11189.igatecorp.com: java.lang.ArrayIndexOutOfBoundsException 
> (null) [duplicate 3]
> 15/04/08 16:54:06 INFO TaskSetManager: Starting task 0.2 in stage 0.0 (TID 6, 
> blrwfl11189.igatecorp.com, PROCESS_LOCAL, 1350 bytes)
> 15/04/08 16:54:06 INFO TaskSetManager: Lost task 1.3 in stage 0.0 (TID 5) on 
> executor blrwfl11189.igatecorp.com: java.lang.ArrayIndexOutOfBoundsException 
> (null) [duplicate 4]
> 15/04/08 16:54:06 ERROR TaskSetManager: Task 1 in stage 0.0 failed 4 times; 
> aborting job
> 15/04/08 16:54:06 INFO TaskSchedulerImpl: Cancelling stage 0
> 15/04/08 16:54:06 INFO TaskSchedulerImpl: Stage 0 was cancelled
> 15/04/08 16:54:06 INFO DAGScheduler: Job 0 failed: saveAsTextFile at 
> JavaSchemaRDD.scala:42, took 1.914477 s
> Exception in thread "main" org.apache.spark.SparkException: Job aborted due 
> to stage failure: Task 1 in stage 0.0 failed 4 times, most recent failure: 
> Lost task 1.3 in stage 0.0 (TID 5, blrwfl11189.igatecorp.com): 
> java.lang.ArrayIndexOutOfBoundsException
> Driver stacktrace:
>         at 
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1214)
>         at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1203)
>         at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1202)
>         at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>         at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>         at 
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1202)
>         at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:696)
>         at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:696)
>         at scala.Option.foreach(Option.scala:236)
>         at 
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:696)
>         at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1420)
>         at akka.actor.Actor$class.aroundReceive(Actor.scala:465)
>         at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessActor.aroundReceive(DAGScheduler.scala:1375)
>         at akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)
>         at akka.actor.ActorCell.invoke(ActorCell.scala:487)
>         at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:238)
>         at akka.dispatch.Mailbox.run(Mailbox.scala:220)
>         at 
> akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:393)
>         at 
> scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
>         at 
> scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
>         at 
> scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
>         at 
> scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
> [aditya@blrwfl11189 ~]$
>  {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to