YannByron commented on code in PR #2314:
URL: https://github.com/apache/fluss/pull/2314#discussion_r2681034709
##########
fluss-spark/fluss-spark-common/src/main/scala/org/apache/fluss/spark/catalog/SupportsFlussPartitionManagement.scala:
##########
@@ -48,6 +59,69 @@ trait SupportsFlussPartitionManagement extends
AbstractSparkTable with SupportsP
override def listPartitionIdentifiers(
names: Array[String],
ident: InternalRow): Array[InternalRow] = {
- throw new UnsupportedOperationException("Listing partition is not
supported")
+ assert(
+ names.length == ident.numFields,
+ s"Number of partition names (${names.length}) must be equal to " +
+ s"the number of partition values (${ident.numFields})."
+ )
+ val schema = partitionSchema()
+ assert(
+ names.forall(fieldName => schema.fieldNames.contains(fieldName)),
+ s"Some partition names ${names.mkString("[", ", ", "]")} don't belong to
" +
+ s"the partition schema '${schema.sql}'."
+ )
+
+ val flussPartitionRows = admin
+ .listPartitionInfos(tableInfo.getTablePath)
+ .get()
+ .asScala
+ .map(p => toInternalRow(p.getPartitionSpec, schema))
+
+ val indexes = names.map(schema.fieldIndex)
+ val dataTypes = names.map(schema(_).dataType)
+ val currentRow = new GenericInternalRow(new Array[Any](names.length))
+ flussPartitionRows.filter {
+ partRow =>
+ for (i <- names.indices) {
+ currentRow.values(i) = partRow.get(indexes(i), dataTypes(i))
+ }
+ currentRow == ident
+ }.toArray
+ }
+}
+
+object SupportsFlussPartitionManagement {
+ private def toInternalRow(
+ partitionSpec: PartitionSpec,
+ partitionSchema: StructType): InternalRow = {
+ val row = new SpecificInternalRow(partitionSchema)
Review Comment:
Let's keep it here temporarily. I will move this to other place in `to
support batch read` PR later.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]