[
https://issues.apache.org/jira/browse/HIVE-15165?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15903966#comment-15903966
]
Ashutosh Chauhan commented on HIVE-15165:
-----------------------------------------
This is still flaky. Latest failure :
https://builds.apache.org/job/PreCommit-HIVE-Build/4044/testReport/junit/org.apache.hadoop.hive.cli/TestSparkNegativeCliDriver/org_apache_hadoop_hive_cli_TestSparkNegativeCliDriver/
{code}
PREHOOK: query: ANALYZE TABLE src_thrift COMPUTE STATISTICS
PREHOOK: type: QUERY
PREHOOK: Input: default@src_thrift
PREHOOK: Output: default@src_thrift
Query ID = hiveptest_20170309020023_8fd3594a-955a-445f-95d1-5f1366fcd4db
Total jobs = 1
Launching Job 1 out of 1
In order to change the average load for a reducer (in bytes):
set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
set mapreduce.job.reduces=<number>
Starting Spark Job = 774d03c2-bebd-4d9b-81a9-e9d888f5e937
Job failed with java.io.IOException: Failed to create local dir in
/tmp/blockmgr-9bbc7122-16f7-46a1-b86a-0ee0f1d69c16/3c.
at
org.apache.spark.storage.DiskBlockManager.getFile(DiskBlockManager.scala:70)
at org.apache.spark.storage.DiskStore.contains(DiskStore.scala:124)
at
org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$getCurrentBlockStatus(BlockManager.scala:379)
at
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:959)
at
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:910)
at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:866)
at
org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:910)
at
org.apache.spark.storage.BlockManager.putIterator(BlockManager.scala:700)
at
org.apache.spark.storage.BlockManager.putSingle(BlockManager.scala:1213)
at
org.apache.spark.broadcast.TorrentBroadcast.writeBlocks(TorrentBroadcast.scala:103)
at
org.apache.spark.broadcast.TorrentBroadcast.<init>(TorrentBroadcast.scala:86)
at
org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast(TorrentBroadcastFactory.scala:34)
at
org.apache.spark.broadcast.BroadcastManager.newBroadcast(BroadcastManager.scala:56)
at org.apache.spark.SparkContext.broadcast(SparkContext.scala:1370)
at org.apache.spark.rdd.HadoopRDD.<init>(HadoopRDD.scala:125)
at
org.apache.spark.SparkContext$$anonfun$hadoopRDD$1.apply(SparkContext.scala:965)
at
org.apache.spark.SparkContext$$anonfun$hadoopRDD$1.apply(SparkContext.scala:961)
at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.SparkContext.withScope(SparkContext.scala:682)
at org.apache.spark.SparkContext.hadoopRDD(SparkContext.scala:961)
at
org.apache.spark.api.java.JavaSparkContext.hadoopRDD(JavaSparkContext.scala:412)
at
org.apache.hadoop.hive.ql.exec.spark.SparkPlanGenerator.generateMapInput(SparkPlanGenerator.java:198)
at
org.apache.hadoop.hive.ql.exec.spark.SparkPlanGenerator.generateParentTran(SparkPlanGenerator.java:138)
at
org.apache.hadoop.hive.ql.exec.spark.SparkPlanGenerator.generate(SparkPlanGenerator.java:110)
at
org.apache.hadoop.hive.ql.exec.spark.RemoteHiveSparkClient$JobStatusJob.call(RemoteHiveSparkClient.java:346)
at
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:358)
at
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:323)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
FAILED: Execution Error, return code 3 from
org.apache.hadoop.hive.ql.exec.spark.SparkTask. java.io.IOException: Failed to
create local dir in /tmp/blockmgr-9bbc7122-16f7-46a1-b86a-0ee0f1d69c16/3c.
at
org.apache.spark.storage.DiskBlockManager.getFile(DiskBlockManager.scala:70)
at org.apache.spark.storage.DiskStore.contains(DiskStore.scala:124)
at
org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$getCurrentBlockStatus(BlockManager.scala:379)
at
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:959)
at
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:910)
at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:866)
at
org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:910)
at
org.apache.spark.storage.BlockManager.putIterator(BlockManager.scala:700)
at
org.apache.spark.storage.BlockManager.putSingle(BlockManager.scala:1213)
at
org.apache.spark.broadcast.TorrentBroadcast.writeBlocks(TorrentBroadcast.scala:103)
at
org.apache.spark.broadcast.TorrentBroadcast.<init>(TorrentBroadcast.scala:86)
at
org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast(TorrentBroadcastFactory.scala:34)
at
org.apache.spark.broadcast.BroadcastManager.newBroadcast(BroadcastManager.scala:56)
at org.apache.spark.SparkContext.broadcast(SparkContext.scala:1370)
at org.apache.spark.rdd.HadoopRDD.<init>(HadoopRDD.scala:125)
at
org.apache.spark.SparkContext$$anonfun$hadoopRDD$1.apply(SparkContext.scala:965)
at
org.apache.spark.SparkContext$$anonfun$hadoopRDD$1.apply(SparkContext.scala:961)
at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.SparkContext.withScope(SparkContext.scala:682)
at org.apache.spark.SparkContext.hadoopRDD(SparkContext.scala:961)
at
org.apache.spark.api.java.JavaSparkContext.hadoopRDD(JavaSparkContext.scala:412)
at
org.apache.hadoop.hive.ql.exec.spark.SparkPlanGenerator.generateMapInput(SparkPlanGenerator.java:198)
at
org.apache.hadoop.hive.ql.exec.spark.SparkPlanGenerator.generateParentTran(SparkPlanGenerator.java:138)
at
org.apache.hadoop.hive.ql.exec.spark.SparkPlanGenerator.generate(SparkPlanGenerator.java:110)
at
org.apache.hadoop.hive.ql.exec.spark.RemoteHiveSparkClient$JobStatusJob.call(RemoteHiveSparkClient.java:346)
at
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:358)
at
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:323)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
{code}
cc: [~spena] [~xuefuz]
> Flaky test: TestSparkNegativeCliDriver
> --------------------------------------
>
> Key: HIVE-15165
> URL: https://issues.apache.org/jira/browse/HIVE-15165
> Project: Hive
> Issue Type: Sub-task
> Reporter: Siddharth Seth
>
--
This message was sent by Atlassian JIRA
(v6.3.15#6346)