[ 
https://issues.apache.org/jira/browse/HIVE-16780?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

liyunzhang_intel reassigned HIVE-16780:
---------------------------------------


> Case "multiple sources, single key" in spark_dynamic_pruning.q fails 
> ---------------------------------------------------------------------
>
>                 Key: HIVE-16780
>                 URL: https://issues.apache.org/jira/browse/HIVE-16780
>             Project: Hive
>          Issue Type: Bug
>            Reporter: liyunzhang_intel
>            Assignee: liyunzhang_intel
>
> script.q
> {code}
> set hive.optimize.ppd=true;
> set hive.ppd.remove.duplicatefilters=true;
> set hive.spark.dynamic.partition.pruning=true;
> set hive.optimize.metadataonly=false;
> set hive.optimize.index.filter=true;
> set hive.strict.checks.cartesian.product=false;
> set hive.spark.dynamic.partition.pruning=true;
> -- multiple sources, single key
> select count(*) from srcpart join srcpart_date on (srcpart.ds = 
> srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
> {code}
> exception 
> {code}
> job failed with java.io.FileNotFoundException: File 
> hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
>  does not exist.
> FAILED: Execution Error, return code 3 from 
> org.apache.hadoop.hive.ql.exec.spark.SparkTask. java.lang.RuntimeException: 
> org.apache.hadoop.hive.ql.metadata.HiveException: 
> java.io.FileNotFoundException: File 
> hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
>  does not exist.
>     at 
> org.apache.hadoop.hive.ql.io.HiveInputFormat.init(HiveInputFormat.java:404)
>     at 
> org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.getSplits(CombineHiveInputFormat.java:498)
>     at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:200)
>     at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
>     at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
>     at scala.Option.getOrElse(Option.scala:121)
>     at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
>     at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>     at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
>     at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
>     at scala.Option.getOrElse(Option.scala:121)
>     at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
>     at org.apache.spark.ShuffleDependency.<init>(Dependency.scala:91)
>     at org.apache.spark.rdd.ShuffledRDD.getDependencies(ShuffledRDD.scala:91)
>     at org.apache.spark.rdd.RDD$$anonfun$dependencies$2.apply(RDD.scala:235)
>     at org.apache.spark.rdd.RDD$$anonfun$dependencies$2.apply(RDD.scala:233)
>     at scala.Option.getOrElse(Option.scala:121)
>     at org.apache.spark.rdd.RDD.dependencies(RDD.scala:233)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:144)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:149)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:149)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddToString(SparkUtilities.java:149)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkUtilities.rddGraphToString(SparkUtilities.java:134)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkPlan.generateGraph(SparkPlan.java:93)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.RemoteHiveSparkClient$JobStatusJob.call(RemoteHiveSparkClient.java:349)
>     at 
> org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:358)
>     at 
> org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:323)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: 
> java.io.FileNotFoundException: File 
> hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
>  does not exist.
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner.processFiles(SparkDynamicPartitionPruner.java:147)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner.prune(SparkDynamicPartitionPruner.java:76)
>     at 
> org.apache.hadoop.hive.ql.io.HiveInputFormat.init(HiveInputFormat.java:402)
>     ... 30 more
> Caused by: java.io.FileNotFoundException: File 
> hdfs://bdpe41:8020/tmp/hive/root/de80d82a-b910-4b87-940c-6be3ea37ba25/hive_2017-05-27_14-55-30_114_8497388836256415979-1/-mr-10004/1/5
>  does not exist.
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem.listStatusInternal(DistributedFileSystem.java:795)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem.access$700(DistributedFileSystem.java:106)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:853)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:849)
>     at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>     at 
> org.apache.hadoop.hdfs.DistributedFileSystem.listStatus(DistributedFileSystem.java:860)
>     at 
> org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner.processFiles(SparkDynamicPartitionPruner.java:119)
>     ... 32 more
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to