jpd-defra commented on issue #2250:
URL: https://github.com/apache/sedona/issues/2250#issuecomment-3166856939

   Hi, I'm getting very similar looking errors that have started to appear over 
the past couple of days. I have tested with the following Sedona versions on 
Databricks clusters with various runtimes:
   
   Sedona 1.5.0 on DBR 13.3LTS
   Sedona 1.6.0 on DBR 13.3LTS
   Sedona 1.7.1 on DBR 15.4LTS
   Sedona 1.7.2 on DBR 15.4LTS
   
   The error seems to be specifically writing geoparquets to the data lake with 
a command such as:
   
   `data.write.format("geoparquet").mode("overwrite").save(out_path)`
   
   Writing parquets with `pyspark`, or even writing geoparquets with 
`geopandas` is working for me. 
   
   My error stack trace below:
   
   ```
   ---------------------------------------------------------------------------
   Py4JJavaError                             Traceback (most recent call last)
   File <command-7943125951080619>, line 21
        16 all_ap = union_all_dataframes(dfs).select(
        17     "id", "greenspace_id", "area_ha", "prow_perim_length", "geometry"
        18 )
        20 # write out clean version with one instance of each access point
   ---> 21 
all_ap.write.format("geoparquet").mode("overwrite").save(alt_all_ap_combined_path)
   
   File /databricks/spark/python/pyspark/instrumentation_utils.py:48, in 
_wrap_function.<locals>.wrapper(*args, **kwargs)
        46 start = time.perf_counter()
        47 try:
   ---> 48     res = func(*args, **kwargs)
        49     logger.log_success(
        50         module_name, class_name, function_name, time.perf_counter() 
- start, signature
        51     )
        52     return res
   
   File /databricks/spark/python/pyspark/sql/readwriter.py:1463, in 
DataFrameWriter.save(self, path, format, mode, partitionBy, **options)
      1461     self._jwrite.save()
      1462 else:
   -> 1463     self._jwrite.save(path)
   
   File 
/databricks/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/java_gateway.py:1355, 
in JavaMember.__call__(self, *args)
      1349 command = proto.CALL_COMMAND_NAME +\
      1350     self.command_header +\
      1351     args_command +\
      1352     proto.END_COMMAND_PART
      1354 answer = self.gateway_client.send_command(command)
   -> 1355 return_value = get_return_value(
      1356     answer, self.gateway_client, self.target_id, self.name)
      1358 for temp_arg in temp_args:
      1359     if hasattr(temp_arg, "_detach"):
   
   File /databricks/spark/python/pyspark/errors/exceptions/captured.py:188, in 
capture_sql_exception.<locals>.deco(*a, **kw)
       186 def deco(*a: Any, **kw: Any) -> Any:
       187     try:
   --> 188         return f(*a, **kw)
       189     except Py4JJavaError as e:
       190         converted = convert_exception(e.java_exception)
   
   File 
/databricks/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/protocol.py:326, in 
get_return_value(answer, gateway_client, target_id, name)
       324 value = OUTPUT_CONVERTER[type](answer[2:], gateway_client)
       325 if answer[1] == REFERENCE_TYPE:
   --> 326     raise Py4JJavaError(
       327         "An error occurred while calling {0}{1}{2}.\n".
       328         format(target_id, ".", name), value)
       329 else:
       330     raise Py4JError(
       331         "An error occurred while calling {0}{1}{2}. Trace:\n{3}\n".
       332         format(target_id, ".", name, value))
   
   Py4JJavaError: An error occurred while calling o506.save.
   : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 
in stage 17.0 failed 4 times, most recent failure: Lost task 0.3 in stage 17.0 
(TID 39) (10.178.154.145 executor 0): java.lang.NoSuchMethodError: 
org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(Ljava/lang/String;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)V
        at 
org.apache.spark.sql.execution.datasources.parquet.GeoParquetFileFormat$$anon$1.newInstance(GeoParquetFileFormat.scala:146)
        at 
org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.newOutputWriter(FileFormatDataWriter.scala:185)
        at 
org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.<init>(FileFormatDataWriter.scala:167)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:508)
        at 
org.apache.spark.sql.execution.datasources.WriteFilesExec.$anonfun$doExecuteWrite$1(WriteFiles.scala:116)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:934)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:934)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60)
        at 
org.apache.spark.rdd.RDD.$anonfun$computeOrReadCheckpoint$1(RDD.scala:410)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:407)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:374)
        at 
org.apache.spark.scheduler.ResultTask.$anonfun$runTask$3(ResultTask.scala:82)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at 
org.apache.spark.scheduler.ResultTask.$anonfun$runTask$1(ResultTask.scala:82)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
        at 
org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:196)
        at org.apache.spark.scheduler.Task.doRunTask(Task.scala:181)
        at org.apache.spark.scheduler.Task.$anonfun$run$5(Task.scala:146)
        at 
com.databricks.unity.EmptyHandle$.runWithAndClose(UCSHandle.scala:129)
        at org.apache.spark.scheduler.Task.$anonfun$run$1(Task.scala:146)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.scheduler.Task.run(Task.scala:99)
        at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$8(Executor.scala:900)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1776)
        at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:903)
        at 
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:798)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750)
   
   Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:3694)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:3616)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:3603)
        at 
scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
        at 
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:3603)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1548)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1548)
        at scala.Option.foreach(Option.scala:407)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1548)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3939)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3851)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3839)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:51)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$runJob$1(DAGScheduler.scala:1272)
        at 
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
        at 
com.databricks.spark.util.FrameProfiler$.record(FrameProfiler.scala:94)
        at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:1260)
        at org.apache.spark.SparkContext.runJobInternal(SparkContext.scala:2961)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2944)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeWrite$4(FileFormatWriter.scala:410)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.writeAndCommit(FileFormatWriter.scala:374)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeWrite(FileFormatWriter.scala:407)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$1(FileFormatWriter.scala:292)
        at 
com.databricks.spark.util.FrameProfiler$.record(FrameProfiler.scala:94)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:117)
        at 
org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:193)
        at 
org.apache.spark.sql.execution.command.DataWritingCommandExec.$anonfun$sideEffectResult$3(commands.scala:132)
        at 
com.databricks.spark.util.FrameProfiler$.record(FrameProfiler.scala:94)
        at 
org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:130)
        at 
org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:129)
        at 
org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:144)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$$nestedInanonfun$eagerlyExecuteCommands$1$1.$anonfun$applyOrElse$3(QueryExecution.scala:336)
        at 
org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:166)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$$nestedInanonfun$eagerlyExecuteCommands$1$1.$anonfun$applyOrElse$2(QueryExecution.scala:336)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withCustomExecutionEnv$9(SQLExecution.scala:303)
        at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:536)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withCustomExecutionEnv$1(SQLExecution.scala:226)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:1148)
        at 
org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:157)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:485)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$$nestedInanonfun$eagerlyExecuteCommands$1$1.$anonfun$applyOrElse$1(QueryExecution.scala:335)
        at 
org.apache.spark.sql.execution.QueryExecution.org$apache$spark$sql$execution$QueryExecution$$withMVTagsIfNecessary(QueryExecution.scala:274)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$$nestedInanonfun$eagerlyExecuteCommands$1$1.applyOrElse(QueryExecution.scala:330)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$$nestedInanonfun$eagerlyExecuteCommands$1$1.applyOrElse(QueryExecution.scala:315)
        at 
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:465)
        at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:69)
        at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:465)
        at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:39)
        at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:339)
        at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:335)
        at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:39)
        at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:39)
        at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:441)
        at 
org.apache.spark.sql.execution.QueryExecution.$anonfun$eagerlyExecuteCommands$1(QueryExecution.scala:315)
        at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:395)
        at 
org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:315)
        at 
org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:232)
        at 
org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:229)
        at 
org.apache.spark.sql.execution.QueryExecution.assertCommandExecuted(QueryExecution.scala:406)
        at 
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:956)
        at 
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:424)
        at 
org.apache.spark.sql.DataFrameWriter.saveInternal(DataFrameWriter.scala:391)
        at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:250)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:397)
        at py4j.Gateway.invoke(Gateway.java:306)
        at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        at py4j.commands.CallCommand.execute(CallCommand.java:79)
        at 
py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:199)
        at py4j.ClientServerConnection.run(ClientServerConnection.java:119)
        at java.lang.Thread.run(Thread.java:750)
   Caused by: java.lang.NoSuchMethodError: 
org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(Ljava/lang/String;Lorg/apache/hadoop/mapreduce/TaskAttemptContext;)V
        at 
org.apache.spark.sql.execution.datasources.parquet.GeoParquetFileFormat$$anon$1.newInstance(GeoParquetFileFormat.scala:146)
        at 
org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.newOutputWriter(FileFormatDataWriter.scala:185)
        at 
org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.<init>(FileFormatDataWriter.scala:167)
        at 
org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:508)
        at 
org.apache.spark.sql.execution.datasources.WriteFilesExec.$anonfun$doExecuteWrite$1(WriteFiles.scala:116)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:934)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:934)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60)
        at 
org.apache.spark.rdd.RDD.$anonfun$computeOrReadCheckpoint$1(RDD.scala:410)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:407)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:374)
        at 
org.apache.spark.scheduler.ResultTask.$anonfun$runTask$3(ResultTask.scala:82)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at 
org.apache.spark.scheduler.ResultTask.$anonfun$runTask$1(ResultTask.scala:82)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
        at 
org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:196)
        at org.apache.spark.scheduler.Task.doRunTask(Task.scala:181)
        at org.apache.spark.scheduler.Task.$anonfun$run$5(Task.scala:146)
        at 
com.databricks.unity.EmptyHandle$.runWithAndClose(UCSHandle.scala:129)
        at org.apache.spark.scheduler.Task.$anonfun$run$1(Task.scala:146)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.scheduler.Task.run(Task.scala:99)
        at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$8(Executor.scala:900)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1776)
        at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:903)
        at 
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
        at 
com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:798)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        ... 1 more
   ```
   
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to