See <https://builds.apache.org/job/Mahout-Quality/3377/changes>
Changes:
[smarthi] Rolling back Mahout 0.12.2 Release candidate, thanks github
connectivity
------------------------------------------
[...truncated 55968 lines...]
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)
at
scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at
scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(13/16) switched to
RUNNING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(12/16) switched to
RUNNING
06/12/2016 23:10:37 Job execution switched to status FAILING.
java.lang.Exception: Failed to deploy the task to slot SimpleSlot (0)(1) -
df67b19955b334462482d095752b6ddb @ localhost - 16 slots - URL:
akka://flink/user/taskmanager_1 - ALLOCATED/ALIVE: Response was not of type
Acknowledge
at
org.apache.flink.runtime.executiongraph.Execution$2.onComplete(Execution.java:395)
at akka.dispatch.OnComplete.internal(Future.scala:247)
at akka.dispatch.OnComplete.internal(Future.scala:244)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:174)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:171)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)
at
scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at
scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
06/12/2016 23:10:37 CHAIN DataSource (at
org.apache.flink.api.scala.ExecutionEnvironment.createInput(ExecutionEnvironment.scala:396)
(org.apache.flink.api.scala.hadoop.mapred.HadoopInputFo) -> Map (Map at
org.apache.mahout.flinkbindings.FlinkEngine$.drmDfsRead(FlinkEngine.scala:75))(5/16)
switched to CANCELING
06/12/2016 23:10:37 CHAIN DataSource (at
org.apache.flink.api.scala.ExecutionEnvironment.createInput(ExecutionEnvironment.scala:396)
(org.apache.flink.api.scala.hadoop.mapred.HadoopInputFo) -> Map (Map at
org.apache.mahout.flinkbindings.FlinkEngine$.drmDfsRead(FlinkEngine.scala:75))(6/16)
switched to CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(1/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(2/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(3/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(4/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(5/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(6/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(7/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(8/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(9/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(10/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(11/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(11/16) switched to
RUNNING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(12/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(13/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(14/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(15/16) switched to
CANCELING
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(10/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(7/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(9/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(12/16) switched to
CANCELED
06/12/2016 23:10:37 CHAIN DataSource (at
org.apache.flink.api.scala.ExecutionEnvironment.createInput(ExecutionEnvironment.scala:396)
(org.apache.flink.api.scala.hadoop.mapred.HadoopInputFo) -> Map (Map at
org.apache.mahout.flinkbindings.FlinkEngine$.drmDfsRead(FlinkEngine.scala:75))(5/16)
switched to CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(13/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(8/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(1/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(2/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(4/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(3/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(15/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(5/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(11/16) switched to
CANCELED
06/12/2016 23:10:37 DataSink
(org.apache.flink.api.java.Utils$CollectHelper@4846fae7)(14/16) switched to
CANCELED
06/12/2016 23:10:37 CHAIN DataSource (at
org.apache.flink.api.scala.ExecutionEnvironment.createInput(ExecutionEnvironment.scala:396)
(org.apache.flink.api.scala.hadoop.mapred.HadoopInputFo) -> Map (Map at
org.apache.mahout.flinkbindings.FlinkEngine$.drmDfsRead(FlinkEngine.scala:75))(6/16)
switched to CANCELED
06/12/2016 23:10:37 Job execution switched to status FAILED.
[31m- Model DFS Serialization *** FAILED ***[0m
[31m org.apache.flink.runtime.client.JobExecutionException: Job execution
failed.[0m
[31m at
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anonfun$applyOrElse$7.apply$mcV$sp(JobManager.scala:717)[0m
[31m at
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anonfun$applyOrElse$7.apply(JobManager.scala:663)[0m
[31m at
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anonfun$applyOrElse$7.apply(JobManager.scala:663)[0m
[31m at
scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)[0m
[31m at
scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)[0m
[31m at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:41)[0m
[31m at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:401)[0m
[31m at
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)[0m
[31m at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)[0m
[31m at
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)[0m
[31m ...[0m
[31m Cause: java.lang.Exception: Failed to deploy the task to slot SimpleSlot
(0)(1) - df67b19955b334462482d095752b6ddb @ localhost - 16 slots - URL:
akka://flink/user/taskmanager_1 - ALLOCATED/ALIVE: Response was not of type
Acknowledge[0m
[31m at
org.apache.flink.runtime.executiongraph.Execution$2.onComplete(Execution.java:395)[0m
[31m at akka.dispatch.OnComplete.internal(Future.scala:247)[0m
[31m at akka.dispatch.OnComplete.internal(Future.scala:244)[0m
[31m at akka.dispatch.japi$CallbackBridge.apply(Future.scala:174)[0m
[31m at akka.dispatch.japi$CallbackBridge.apply(Future.scala:171)[0m
[31m at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)[0m
[31m at
scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)[0m
[31m at
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)[0m
[31m at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)[0m
[31m at
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)[0m
[31m ...[0m
06/12/2016 23:10:38 Job execution switched to status RUNNING.
06/12/2016 23:10:38 DataSource (at
org.apache.mahout.flinkbindings.FlinkEngine$.parallelize(FlinkEngine.scala:273)
(org.apache.flink.api.java.io.CollectionInputFormat))(1/1) switched to
SCHEDULED
06/12/2016 23:10:38 DataSource (at
org.apache.mahout.flinkbindings.FlinkEngine$.parallelize(FlinkEngine.scala:273)
(org.apache.flink.api.java.io.CollectionInputFormat))(1/1) switched to
DEPLOYING
06/12/2016 23:10:38 DataSource (at
org.apache.mahout.flinkbindings.FlinkEngine$.parallelize(FlinkEngine.scala:273)
(org.apache.flink.api.java.io.CollectionInputFormat))(1/1) switched to RUNNING
06/12/2016 23:10:38 RangePartition: LocalSample(1/1) switched to SCHEDULED
06/12/2016 23:10:38 DataSource (at
org.apache.mahout.flinkbindings.FlinkEngine$.parallelize(FlinkEngine.scala:273)
(org.apache.flink.api.java.io.CollectionInputFormat))(1/1) switched to FINISHED
06/12/2016 23:10:38 RangePartition: LocalSample(1/1) switched to DEPLOYING
06/12/2016 23:10:38 RangePartition: PreparePartition(1/1) switched to
SCHEDULED
06/12/2016 23:10:38 RangePartition: PreparePartition(1/1) switched to
DEPLOYING
06/12/2016 23:10:38 RangePartition: LocalSample(1/1) switched to RUNNING
06/12/2016 23:10:38 RangePartition: PreparePartition(1/1) switched to
RUNNING
06/12/2016 23:10:38 RangePartition: GlobalSample(1/1) switched to SCHEDULED
06/12/2016 23:10:38 RangePartition: GlobalSample(1/1) switched to DEPLOYING
06/12/2016 23:10:38 RangePartition: LocalSample(1/1) switched to FINISHED
06/12/2016 23:10:38 RangePartition: GlobalSample(1/1) switched to RUNNING
06/12/2016 23:10:38 RangePartition: Histogram(1/1) switched to SCHEDULED
06/12/2016 23:10:38 RangePartition: Histogram(1/1) switched to DEPLOYING
06/12/2016 23:10:38 RangePartition: GlobalSample(1/1) switched to FINISHED
06/12/2016 23:10:38 RangePartition: Histogram(1/1) switched to RUNNING
06/12/2016 23:10:38 RangePartition: Histogram(1/1) switched to FINISHED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(1/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(2/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(3/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(4/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(5/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(1/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(6/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(7/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(4/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(8/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(5/16)
switched to DEPLOYING
06/12/2016 23:10:38 RangePartition: PreparePartition(1/1) switched to
FINISHED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(9/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(8/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(7/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(12/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(14/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(16/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(15/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(11/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(2/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(14/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(13/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(11/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(10/16)
switched to SCHEDULED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(12/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(9/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(6/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(3/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(10/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(13/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(15/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(16/16)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(4/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(1/16)
switched to RUNNING
06/12/2016 23:10:38 Reduce (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(1/1)
switched to SCHEDULED
06/12/2016 23:10:38 Reduce (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(1/1)
switched to DEPLOYING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(5/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(14/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(2/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(11/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(12/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(9/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(8/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(6/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(3/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(7/16)
switched to RUNNING
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(1/16)
switched to FINISHED
06/12/2016 23:10:38 CHAIN RangePartition: Partition -> Partition -> Map
(Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))(6/16)
switched to FINISHED
261715 [CHAIN RangePartition: Partition -> Partition -> Map (Map at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:192)) ->
Combine (Reduce at
org.apache.mahout.flinkbindings.FlinkEngine$.colSums(FlinkEngine.scala:194))
(12/16)] ERROR org.apache.flink.runtime.taskmanager.Task - FATAL - exception
in task resource cleanup
java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at
scala.concurrent.forkjoin.ForkJoinPool.tryAddWorker(ForkJoinPool.java:1672)
at
scala.concurrent.forkjoin.ForkJoinPool.signalWork(ForkJoinPool.java:1966)
at
scala.concurrent.forkjoin.ForkJoinPool.fullExternalPush(ForkJoinPool.java:1905)
at
scala.concurrent.forkjoin.ForkJoinPool.externalPush(ForkJoinPool.java:1834)
at
scala.concurrent.forkjoin.ForkJoinPool.execute(ForkJoinPool.java:2955)
at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinPool.execute(AbstractDispatcher.scala:387)
at
akka.dispatch.ExecutorServiceDelegate$class.execute(ThreadPoolBuilder.scala:212)
at
akka.dispatch.Dispatcher$LazyExecutorServiceDelegate.execute(Dispatcher.scala:43)
at akka.dispatch.Dispatcher.registerForExecution(Dispatcher.scala:118)
at akka.dispatch.Dispatcher.dispatch(Dispatcher.scala:59)
at akka.actor.dungeon.Dispatch$class.sendMessage(Dispatch.scala:123)
at akka.actor.ActorCell.sendMessage(ActorCell.scala:369)
at akka.actor.Cell$class.sendMessage(ActorCell.scala:290)
at akka.actor.ActorCell.sendMessage(ActorCell.scala:369)
at akka.actor.RepointableActorRef.$bang(RepointableActorRef.scala:166)
at akka.actor.ActorRef.tell(ActorRef.scala:123)
at
org.apache.flink.runtime.instance.AkkaActorGateway.tell(AkkaActorGateway.java:79)
at
org.apache.flink.runtime.taskmanager.Task.notifyFinalState(Task.java:735)
at org.apache.flink.runtime.taskmanager.Task.run(Task.java:670)
at java.lang.Thread.run(Thread.java:745)
261715 [flink-akka.actor.default-dispatcher-19] ERROR
org.apache.flink.runtime.executiongraph.ExecutionGraph - Error while notifying
execution graph of execution state transition.
java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at
scala.concurrent.forkjoin.ForkJoinPool.tryAddWorker(ForkJoinPool.java:1672)
at
scala.concurrent.forkjoin.ForkJoinPool.signalWork(ForkJoinPool.java:1966)
at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.push(ForkJoinPool.java:1072)
at scala.concurrent.forkjoin.ForkJoinTask.fork(ForkJoinTask.java:654)
at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinPool.execute(AbstractDispatcher.scala:386)
at
akka.dispatch.ExecutorServiceDelegate$class.execute(ThreadPoolBuilder.scala:212)
at
akka.dispatch.Dispatcher$LazyExecutorServiceDelegate.execute(Dispatcher.scala:43)
at akka.dispatch.Dispatcher.registerForExecution(Dispatcher.scala:118)
at akka.dispatch.Dispatcher.dispatch(Dispatcher.scala:59)
at akka.actor.dungeon.Dispatch$class.sendMessage(Dispatch.scala:123)
at akka.actor.ActorCell.sendMessage(ActorCell.scala:369)
at akka.actor.Cell$class.sendMessage(ActorCell.scala:290)
at akka.actor.ActorCell.sendMessage(ActorCell.scala:369)
at akka.actor.RepointableActorRef.$bang(RepointableActorRef.scala:166)
at akka.actor.ActorRef.tell(ActorRef.scala:123)
at
org.apache.flink.runtime.instance.AkkaActorGateway.tell(AkkaActorGateway.java:79)
at
org.apache.flink.runtime.executiongraph.ExecutionGraph.notifyExecutionChange(ExecutionGraph.java:1217)
at
org.apache.flink.runtime.executiongraph.ExecutionVertex.notifyStateTransition(ExecutionVertex.java:627)
at
org.apache.flink.runtime.executiongraph.Execution.transitionState(Execution.java:984)
at
org.apache.flink.runtime.executiongraph.Execution.transitionState(Execution.java:966)
at
org.apache.flink.runtime.executiongraph.Execution.markFinished(Execution.java:658)
at
org.apache.flink.runtime.executiongraph.ExecutionGraph.updateState(ExecutionGraph.java:1091)
at
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anonfun$applyOrElse$4.apply$mcV$sp(JobManager.scala:518)
at
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anonfun$applyOrElse$4.apply(JobManager.scala:517)
at
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anonfun$applyOrElse$4.apply(JobManager.scala:517)
at
scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at
scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:41)
at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:401)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at
scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
261718 [flink-akka.actor.default-dispatcher-4] ERROR
org.apache.flink.runtime.taskmanager.TaskManager -
==============================================================
====================== FATAL =======================
==============================================================
A fatal error occurred, forcing the TaskManager to shut down: FATAL - exception
in task resource cleanup
java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at
scala.concurrent.forkjoin.ForkJoinPool.tryAddWorker(ForkJoinPool.java:1672)
at
scala.concurrent.forkjoin.ForkJoinPool.signalWork(ForkJoinPool.java:1966)
at
scala.concurrent.forkjoin.ForkJoinPool.fullExternalPush(ForkJoinPool.java:1905)
at
scala.concurrent.forkjoin.ForkJoinPool.externalPush(ForkJoinPool.java:1834)
at
scala.concurrent.forkjoin.ForkJoinPool.execute(ForkJoinPool.java:2955)
at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinPool.execute(AbstractDispatcher.scala:387)
at
akka.dispatch.ExecutorServiceDelegate$class.execute(ThreadPoolBuilder.scala:212)
at
akka.dispatch.Dispatcher$LazyExecutorServiceDelegate.execute(Dispatcher.scala:43)
at akka.dispatch.Dispatcher.registerForExecution(Dispatcher.scala:118)
at akka.dispatch.Dispatcher.dispatch(Dispatcher.scala:59)
at akka.actor.dungeon.Dispatch$class.sendMessage(Dispatch.scala:123)
at akka.actor.ActorCell.sendMessage(ActorCell.scala:369)
at akka.actor.Cell$class.sendMessage(ActorCell.scala:290)
at akka.actor.ActorCell.sendMessage(ActorCell.scala:369)
at akka.actor.RepointableActorRef.$bang(RepointableActorRef.scala:166)
at akka.actor.ActorRef.tell(ActorRef.scala:123)
at
org.apache.flink.runtime.instance.AkkaActorGateway.tell(AkkaActorGateway.java:79)
at
org.apache.flink.runtime.taskmanager.Task.notifyFinalState(Task.java:735)
at org.apache.flink.runtime.taskmanager.Task.run(Task.java:670)
at java.lang.Thread.run(Thread.java:745)
Build timed out (after 223 minutes). Marking the build as failed.
Build was aborted
[PMD] Skipping publisher since build result is FAILURE
[TASKS] Skipping publisher since build result is FAILURE
Archiving artifacts
Compressed 171.05 MB of artifacts by 88.9% relative to #3363
Recording test results
Publishing Javadoc