This is an automated email from the ASF dual-hosted git repository.

jiayu pushed a commit to branch geotiff-enhance
in repository https://gitbox.apache.org/repos/asf/sedona.git

commit 2238b44a7542a54d6158c5f93c4dd8b88f15d512
Author: Jia Yu <[email protected]>
AuthorDate: Thu May 11 01:38:13 2023 -0700

    Throw exception if used for reading
---
 .../sql/sedona_sql/io/raster/RasterFileFormat.scala | 21 ++++++---------------
 1 file changed, 6 insertions(+), 15 deletions(-)

diff --git 
a/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/raster/RasterFileFormat.scala
 
b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/raster/RasterFileFormat.scala
index 54bf05d4..85942fd1 100644
--- 
a/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/raster/RasterFileFormat.scala
+++ 
b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/raster/RasterFileFormat.scala
@@ -20,14 +20,13 @@
 
 package org.apache.spark.sql.sedona_sql.io.raster
 
-import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.{FSDataOutputStream, FileStatus, Path}
 import org.apache.hadoop.mapreduce.{Job, TaskAttemptContext}
 import org.apache.sedona.common.raster.Serde
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.execution.datasources.{FileFormat, OutputWriter, 
OutputWriterFactory, PartitionedFile}
-import org.apache.spark.sql.sources.{DataSourceRegister, Filter}
+import org.apache.spark.sql.execution.datasources.{FileFormat, OutputWriter, 
OutputWriterFactory}
+import org.apache.spark.sql.sources.DataSourceRegister
 import org.apache.spark.sql.types.StructType
 import org.geotools.gce.arcgrid.ArcGridWriter
 import org.geotools.gce.geotiff.GeoTiffWriter
@@ -42,7 +41,10 @@ private[spark] class RasterFileFormat extends FileFormat 
with DataSourceRegister
   override def inferSchema(
                             sparkSession: SparkSession,
                             options: Map[String, String],
-                            files: Seq[FileStatus]): Option[StructType] = None
+                            files: Seq[FileStatus]): Option[StructType] = {
+    throw new UnsupportedOperationException("Please use 'binaryFile' data 
source to reading raster files")
+    None
+  }
 
   override def prepareWrite(
                              sparkSession: SparkSession,
@@ -65,17 +67,6 @@ private[spark] class RasterFileFormat extends FileFormat 
with DataSourceRegister
 
   override def shortName(): String = "raster"
 
-  override protected def buildReader(
-                                      sparkSession: SparkSession,
-                                      dataSchema: StructType,
-                                      partitionSchema: StructType,
-                                      requiredSchema: StructType,
-                                      filters: Seq[Filter],
-                                      options: Map[String, String],
-                                      hadoopConf: Configuration): 
(PartitionedFile) => Iterator[InternalRow] = {
-    throw new UnsupportedOperationException("Please use Binary data source to 
reading raster files")
-  }
-
   private def isValidRasterSchema(dataSchema: StructType): Boolean = {
     var imageColExist: Boolean = false
     val fields = dataSchema.fields

Reply via email to