See
<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/5788/display/redirect>
Changes:
------------------------------------------
[...truncated 199.49 KB...]
at
org.apache.beam.vendor.grpc.v1p43p2.io.grpc.internal.ServerCallImpl.closeInternal(ServerCallImpl.java:214)
at
org.apache.beam.vendor.grpc.v1p43p2.io.grpc.internal.ServerCallImpl.close(ServerCallImpl.java:207)
at
org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.ServerCalls$ServerCallStreamObserverImpl.onCompleted(ServerCalls.java:395)
at
org.apache.beam.runners.fnexecution.state.GrpcStateService.close(GrpcStateService.java:63)
at
org.apache.beam.sdk.fn.server.GrpcFnServer.close(GrpcFnServer.java:156)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$WrappedSdkHarnessClient.$closeResource(DefaultJobBundleFactory.java:642)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$WrappedSdkHarnessClient.close(DefaultJobBundleFactory.java:642)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$WrappedSdkHarnessClient.unref(DefaultJobBundleFactory.java:658)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$WrappedSdkHarnessClient.access$400(DefaultJobBundleFactory.java:589)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory.close(DefaultJobBundleFactory.java:334)
at
org.apache.beam.runners.fnexecution.control.DefaultExecutableStageContext.close(DefaultExecutableStageContext.java:43)
at
org.apache.beam.runners.fnexecution.control.ReferenceCountingExecutableStageContextFactory$WrappedContext.closeActual(ReferenceCountingExecutableStageContextFactory.java:212)
at
org.apache.beam.runners.fnexecution.control.ReferenceCountingExecutableStageContextFactory$WrappedContext.access$200(ReferenceCountingExecutableStageContextFactory.java:188)
at
org.apache.beam.runners.fnexecution.control.ReferenceCountingExecutableStageContextFactory.release(ReferenceCountingExecutableStageContextFactory.java:177)
at
org.apache.beam.runners.fnexecution.control.ReferenceCountingExecutableStageContextFactory.scheduleRelease(ReferenceCountingExecutableStageContextFactory.java:136)
at
org.apache.beam.runners.fnexecution.control.ReferenceCountingExecutableStageContextFactory.access$300(ReferenceCountingExecutableStageContextFactory.java:48)
at
org.apache.beam.runners.fnexecution.control.ReferenceCountingExecutableStageContextFactory$WrappedContext.close(ReferenceCountingExecutableStageContextFactory.java:208)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.$closeResource(SparkExecutableStageFunction.java:210)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.call(SparkExecutableStageFunction.java:210)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.lambda$forPair$55b1e86f$1(SparkExecutableStageFunction.java:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
22/03/14 12:14:47 ERROR org.apache.spark.executor.Executor: Exception in task
1.0 in stage 127.0 (TID 189)
java.lang.RuntimeException: No client connected within timeout
at
org.apache.beam.runners.fnexecution.data.GrpcDataService.send(GrpcDataService.java:192)
at
org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor.newBundle(SdkHarnessClient.java:287)
at
org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor.newBundle(SdkHarnessClient.java:197)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$SimpleStageBundleFactory.getBundle(DefaultJobBundleFactory.java:519)
at
org.apache.beam.runners.fnexecution.control.StageBundleFactory.getBundle(StageBundleFactory.java:87)
at
org.apache.beam.runners.fnexecution.control.StageBundleFactory.getBundle(StageBundleFactory.java:76)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.processElements(SparkExecutableStageFunction.java:223)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.call(SparkExecutableStageFunction.java:150)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.lambda$forPair$55b1e86f$1(SparkExecutableStageFunction.java:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.util.concurrent.TimeoutException: Waited 3 minutes for
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.SettableFuture@512acce2[status=PENDING]
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:471)
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.AbstractFuture$TrustedFuture.get(AbstractFuture.java:90)
at
org.apache.beam.runners.fnexecution.data.GrpcDataService.send(GrpcDataService.java:186)
... 24 more
22/03/14 12:14:47 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0
in stage 127.0 (TID 189, localhost, executor driver):
java.lang.RuntimeException: No client connected within timeout
at
org.apache.beam.runners.fnexecution.data.GrpcDataService.send(GrpcDataService.java:192)
at
org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor.newBundle(SdkHarnessClient.java:287)
at
org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor.newBundle(SdkHarnessClient.java:197)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$SimpleStageBundleFactory.getBundle(DefaultJobBundleFactory.java:519)
at
org.apache.beam.runners.fnexecution.control.StageBundleFactory.getBundle(StageBundleFactory.java:87)
at
org.apache.beam.runners.fnexecution.control.StageBundleFactory.getBundle(StageBundleFactory.java:76)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.processElements(SparkExecutableStageFunction.java:223)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.call(SparkExecutableStageFunction.java:150)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.lambda$forPair$55b1e86f$1(SparkExecutableStageFunction.java:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.util.concurrent.TimeoutException: Waited 3 minutes for
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.SettableFuture@512acce2[status=PENDING]
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:471)
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.AbstractFuture$TrustedFuture.get(AbstractFuture.java:90)
at
org.apache.beam.runners.fnexecution.data.GrpcDataService.send(GrpcDataService.java:186)
... 24 more
22/03/14 12:14:47 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in
stage 127.0 failed 1 times; aborting job
22/03/14 12:14:47 ERROR org.apache.beam.runners.jobsubmission.JobInvocation:
Error during job invocation
test_pardo_state_with_custom_key_coder_1647259905.2321231_f5cb2978-2386-4e35-aa65-3eed25455679.
org.apache.beam.sdk.Pipeline$PipelineExecutionException:
java.lang.RuntimeException: No client connected within timeout
at
org.apache.beam.runners.spark.SparkPipelineResult.beamExceptionFrom(SparkPipelineResult.java:73)
at
org.apache.beam.runners.spark.SparkPipelineResult.waitUntilFinish(SparkPipelineResult.java:104)
at
org.apache.beam.runners.spark.SparkPipelineResult.waitUntilFinish(SparkPipelineResult.java:92)
at
org.apache.beam.runners.spark.SparkPipelineRunner.run(SparkPipelineRunner.java:190)
at
org.apache.beam.runners.jobsubmission.JobInvocation.runPipeline(JobInvocation.java:86)
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.RuntimeException: No client connected within timeout
at
org.apache.beam.runners.fnexecution.data.GrpcDataService.send(GrpcDataService.java:192)
at
org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor.newBundle(SdkHarnessClient.java:287)
at
org.apache.beam.runners.fnexecution.control.SdkHarnessClient$BundleProcessor.newBundle(SdkHarnessClient.java:197)
at
org.apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$SimpleStageBundleFactory.getBundle(DefaultJobBundleFactory.java:519)
at
org.apache.beam.runners.fnexecution.control.StageBundleFactory.getBundle(StageBundleFactory.java:87)
at
org.apache.beam.runners.fnexecution.control.StageBundleFactory.getBundle(StageBundleFactory.java:76)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.processElements(SparkExecutableStageFunction.java:223)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.call(SparkExecutableStageFunction.java:150)
at
org.apache.beam.runners.spark.translation.SparkExecutableStageFunction.lambda$forPair$55b1e86f$1(SparkExecutableStageFunction.java:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at
org.apache.spark.api.java.JavaRDDLike$$anonfun$fn$1$1.apply(JavaRDDLike.scala:125)
at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:148)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417)
... 3 more
Caused by: java.util.concurrent.TimeoutException: Waited 3 minutes for
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.SettableFuture@512acce2[status=PENDING]
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:471)
at
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.AbstractFuture$TrustedFuture.get(AbstractFuture.java:90)
at
org.apache.beam.runners.fnexecution.data.GrpcDataService.send(GrpcDataService.java:186)
... 24 more
ERROR:root:java.util.concurrent.TimeoutException: Waited 3 minutes for
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.SettableFuture@512acce2[status=PENDING]
------------------------------ Captured log call -------------------------------
WARNING root:environments.py:371 Make sure that locally built Python SDK
docker image has Python 3.9 interpreter.
ERROR apache_beam.runners.worker.data_plane:data_plane.py:641 Failed to read
inputs in the data plane.
Traceback (most recent call last):
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/data_plane.py",>
line 634, in _read_inputs
for elements in elements_iterator:
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/target/.tox-spark-runner-test/spark-runner-test/lib/python3.9/site-packages/grpc/_channel.py",>
line 426, in __next__
return self._next()
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/target/.tox-spark-runner-test/spark-runner-test/lib/python3.9/site-packages/grpc/_channel.py",>
line 826, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that
terminated with:
status = StatusCode.CANCELLED
details = "Multiplexer hanging up"
debug_error_string =
"{"created":"@1647259905.793393624","description":"Error received from peer
ipv6:[::1]:34263","file":"src/core/lib/surface/call.cc","file_line":903,"grpc_message":"Multiplexer
hanging up","grpc_status":1}"
>
ERROR apache_beam.runners.worker.data_plane:data_plane.py:641 Failed to read
inputs in the data plane.
Traceback (most recent call last):
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/data_plane.py",>
line 634, in _read_inputs
for elements in elements_iterator:
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/target/.tox-spark-runner-test/spark-runner-test/lib/python3.9/site-packages/grpc/_channel.py",>
line 426, in __next__
return self._next()
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/target/.tox-spark-runner-test/spark-runner-test/lib/python3.9/site-packages/grpc/_channel.py",>
line 826, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that
terminated with:
status = StatusCode.UNAVAILABLE
details = "Broken pipe"
debug_error_string =
"{"created":"@1647259907.402077117","description":"Error received from peer
ipv6:[::1]:45557","file":"src/core/lib/surface/call.cc","file_line":903,"grpc_message":"Broken
pipe","grpc_status":14}"
>
ERROR apache_beam.runners.worker.sdk_worker:sdk_worker.py:271 Error
processing instruction 1. Original traceback is
Traceback
(most recent call last):
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/data_plane.py",>
line 487, in input_elements
element =
received.get(timeout=1)
File
"/usr/lib/python3.9/queue.py", line 179, in get
raise Empty
_queue.Empty
During
handling of the above exception, another exception occurred:
Traceback
(most recent call last):
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/sdk_worker.py",>
line 267, in _execute
response =
task()
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/sdk_worker.py",>
line 340, in <lambda>
lambda:
self.create_worker().do_instruction(request), request)
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/sdk_worker.py",>
line 580, in do_instruction
return
getattr(self, request_type)(
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/sdk_worker.py",>
line 618, in process_bundle
bundle_processor.process_bundle(instruction_id))
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/bundle_processor.py",>
line 984, in process_bundle
for
element in data_channel.input_elements(instruction_id,
File
"<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/apache_beam/runners/worker/data_plane.py",>
line 490, in input_elements
raise
RuntimeError('Channel closed prematurely.')
RuntimeError:
Channel closed prematurely.
ERROR root:portable_runner.py:567 java.util.concurrent.TimeoutException:
Waited 3 minutes for
org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.SettableFuture@512acce2[status=PENDING]
=============================== warnings summary ===============================
target/.tox-spark-runner-test/spark-runner-test/lib/python3.9/site-packages/tenacity/_asyncio.py:42
<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/target/.tox-spark-runner-test/spark-runner-test/lib/python3.9/site-packages/tenacity/_asyncio.py>:42:
DeprecationWarning: "@coroutine" decorator is deprecated since Python 3.8, use
"async def" instead
def call(self, fn, *args, **kwargs):
-- Docs: https://docs.pytest.org/en/latest/warnings.html
- generated xml file:
<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/pytest_spark-runner-test.xml>
-
======== 1 failed, 34 passed, 17 skipped, 1 warnings in 229.98 seconds =========
[31mERROR: InvocationError for command
<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/scripts/pytest_validates_runner.sh>
spark-runner-test apache_beam/runners/portability/spark_runner_test.py
'--spark_job_server_jar=<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/runners/spark/2/job-server/build/libs/beam-runners-spark-job-server-2.38.0-SNAPSHOT.jar>
--environment_type=LOOPBACK' (exited with code 1)
[0m[1mspark-runner-test run-test-post: commands[0] | bash
<https://ci-beam.apache.org/job/beam_PostCommit_Python_VR_Spark/ws/src/sdks/python/test-suites/portable/py39/build/srcs/sdks/python/scripts/run_tox_cleanup.sh>
[0m___________________________________ summary
____________________________________
[31mERROR: spark-runner-test: commands failed
[0m
> Task :sdks:python:test-suites:portable:py39:sparkCompatibilityMatrixLOOPBACK
> FAILED
FAILURE: Build failed with an exception.
* What went wrong:
Execution failed for task
':sdks:python:test-suites:portable:py39:sparkCompatibilityMatrixLOOPBACK'.
> Process 'command 'sh'' finished with non-zero exit value 1
* Try:
> Run with --stacktrace option to get the stack trace.
> Run with --info or --debug option to get more log output.
> Run with --scan to get full insights.
* Get more help at https://help.gradle.org
BUILD FAILED in 12m 39s
81 actionable tasks: 52 executed, 27 from cache, 2 up-to-date
Publishing build scan...
Publishing failed.
The build scan server appears to be unavailable.
Please check https://status.gradle.com for the latest service status.
If the service is reported as available, please report this problem via
https://gradle.com/help/plugin and include the following via copy/paste:
----------
Gradle version: 7.3.2
Plugin version: 3.4.1
Request URL: https://status.gradle.com
Request ID: 459b4c5a-97bf-4cba-aaf0-5f02157cfe60
Response status code: 405
Response server type: Varnish
----------
Build step 'Invoke Gradle script' changed build result to FAILURE
Build step 'Invoke Gradle script' marked build as failure
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]