This is an automated email from the ASF dual-hosted git repository.

danny0405 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new e8e6a250195 [HUDI-7054][FOLLOW_UP] HoodieCatalogTable should ignore 
lazily deleted partitions (#10024)
e8e6a250195 is described below

commit e8e6a250195a5640b07ed8636cba842c0b67dd05
Author: Rex(Hui) An <[email protected]>
AuthorDate: Fri Nov 10 22:02:06 2023 +0800

    [HUDI-7054][FOLLOW_UP] HoodieCatalogTable should ignore lazily deleted 
partitions (#10024)
---
 .../apache/spark/sql/catalyst/catalog/HoodieCatalogTable.scala | 10 ++++++++--
 .../sql/hudi/command/ShowHoodieTablePartitionsCommand.scala    |  3 +--
 2 files changed, 9 insertions(+), 4 deletions(-)

diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/catalyst/catalog/HoodieCatalogTable.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/catalyst/catalog/HoodieCatalogTable.scala
index df47f63a8c0..b766e0d315e 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/catalyst/catalog/HoodieCatalogTable.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/catalyst/catalog/HoodieCatalogTable.scala
@@ -23,6 +23,7 @@ import org.apache.hudi.avro.AvroSchemaUtils
 import org.apache.hudi.common.config.{DFSPropertiesConfiguration, 
TypedProperties}
 import org.apache.hudi.common.model.HoodieTableType
 import org.apache.hudi.common.table.HoodieTableConfig.URL_ENCODE_PARTITIONING
+import org.apache.hudi.common.table.timeline.TimelineUtils
 import org.apache.hudi.common.table.{HoodieTableConfig, HoodieTableMetaClient}
 import org.apache.hudi.common.util.StringUtils
 import org.apache.hudi.common.util.ValidationUtils.checkArgument
@@ -169,9 +170,14 @@ class HoodieCatalogTable(val spark: SparkSession, var 
table: CatalogTable) exten
   lazy val partitionSchema: StructType = StructType(tableSchema.filter(f => 
partitionFields.contains(f.name)))
 
   /**
-   * All the partition paths
+   * All the partition paths, excludes lazily deleted partitions.
    */
-  def getPartitionPaths: Seq[String] = getAllPartitionPaths(spark, table)
+  def getPartitionPaths: Seq[String] = {
+    val droppedPartitions = 
TimelineUtils.getDroppedPartitions(metaClient.getActiveTimeline)
+
+    getAllPartitionPaths(spark, table)
+      .filter(!droppedPartitions.contains(_))
+  }
 
   /**
    * Check if table is a partitioned table
diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/ShowHoodieTablePartitionsCommand.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/ShowHoodieTablePartitionsCommand.scala
index a2a35e35ec8..f43b10fcafc 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/ShowHoodieTablePartitionsCommand.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/ShowHoodieTablePartitionsCommand.scala
@@ -17,8 +17,8 @@
 
 package org.apache.spark.sql.hudi.command
 
-import org.apache.hudi.common.table.timeline.TimelineUtils
 import org.apache.hudi.common.util.PartitionPathEncodeUtils
+
 import org.apache.spark.sql.{Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
@@ -55,7 +55,6 @@ case class ShowHoodieTablePartitionsCommand(
         }
       }
         .getOrElse(hoodieCatalogTable.getPartitionPaths)
-        
.filter(!TimelineUtils.getDroppedPartitions(hoodieCatalogTable.metaClient.getActiveTimeline).contains(_))
         .map(Row(_))
     } else {
       Seq.empty[Row]

Reply via email to