This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 1ece299b15fd [SPARK-45902][SQL] Remove unused function
`resolvePartitionColumns` from `DataSource`
1ece299b15fd is described below
commit 1ece299b15fd198a23d64743e152e61fd74750f5
Author: yangjie01 <[email protected]>
AuthorDate: Mon Nov 13 08:26:31 2023 -0800
[SPARK-45902][SQL] Remove unused function `resolvePartitionColumns` from
`DataSource`
### What changes were proposed in this pull request?
`resolvePartitionColumns` was introduced by SPARK-37287
(https://github.com/apache/spark/pull/37099) and become unused after
SPARK-41713 (https://github.com/apache/spark/pull/39220), so this pr remove it
from `DataSource`.
### Why are the changes needed?
Clean up unused code.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Pass GitHub Actions.
### Was this patch authored or co-authored using generative AI tooling?
No
Closes #43779 from LuciferYang/SPARK-45902.
Lead-authored-by: yangjie01 <[email protected]>
Co-authored-by: YangJie <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
.../sql/execution/datasources/DataSource.scala | 25 +---------------------
1 file changed, 1 insertion(+), 24 deletions(-)
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
index b3784dbf8137..cd295f3b17bd 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
@@ -29,9 +29,8 @@ import org.apache.spark.SparkException
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.analysis.{Resolver, UnresolvedAttribute}
+import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.catalog.{BucketSpec,
CatalogStorageFormat, CatalogTable, CatalogUtils}
-import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, TypeUtils}
import org.apache.spark.sql.connector.catalog.TableProvider
@@ -822,26 +821,4 @@ object DataSource extends Logging {
throw
QueryCompilationErrors.writeEmptySchemasUnsupportedByDataSourceError()
}
}
-
- /**
- * Resolve partition columns using output columns of the query plan.
- */
- def resolvePartitionColumns(
- partitionColumns: Seq[Attribute],
- outputColumns: Seq[Attribute],
- plan: LogicalPlan,
- resolver: Resolver): Seq[Attribute] = {
- partitionColumns.map { col =>
- // The partition columns created in `planForWritingFileFormat` should
always be
- // `UnresolvedAttribute` with a single name part.
- assert(col.isInstanceOf[UnresolvedAttribute])
- val unresolved = col.asInstanceOf[UnresolvedAttribute]
- assert(unresolved.nameParts.length == 1)
- val name = unresolved.nameParts.head
- outputColumns.find(a => resolver(a.name, name)).getOrElse {
- throw QueryCompilationErrors.cannotResolveAttributeError(
- name, plan.output.map(_.name).mkString(", "))
- }
- }
- }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]