aokolnychyi commented on a change in pull request #35395: URL: https://github.com/apache/spark/pull/35395#discussion_r823359726
########## File path: sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryRowLevelOperationTable.scala ########## @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connector.catalog + +import java.util + +import org.apache.spark.sql.connector.distributions.{Distribution, Distributions} +import org.apache.spark.sql.connector.expressions.{FieldReference, LogicalExpressions, NamedReference, SortDirection, SortOrder, Transform} +import org.apache.spark.sql.connector.read.{Scan, ScanBuilder} +import org.apache.spark.sql.connector.write.{BatchWrite, LogicalWriteInfo, RequiresDistributionAndOrdering, RowLevelOperation, RowLevelOperationBuilder, RowLevelOperationInfo, Write, WriteBuilder, WriterCommitMessage} +import org.apache.spark.sql.connector.write.RowLevelOperation.Command +import org.apache.spark.sql.types.StructType +import org.apache.spark.sql.util.CaseInsensitiveStringMap + +class InMemoryRowLevelOperationTable( + name: String, + schema: StructType, + partitioning: Array[Transform], + properties: util.Map[String, String]) + extends InMemoryTable(name, schema, partitioning, properties) with SupportsRowLevelOperations { + + override def newRowLevelOperationBuilder( + info: RowLevelOperationInfo): RowLevelOperationBuilder = { + () => PartitionBasedOperation(info.command) + } + + case class PartitionBasedOperation(command: Command) extends RowLevelOperation { + private final val PARTITION_COLUMN_REF = FieldReference(PartitionKeyColumn.name) + + var configuredScan: InMemoryBatchScan = _ + + override def requiredMetadataAttributes(): Array[NamedReference] = { + Array(PARTITION_COLUMN_REF) + } + + override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = { + new InMemoryScanBuilder(schema) { + override def build: Scan = { + val scan = super.build() + configuredScan = scan.asInstanceOf[InMemoryBatchScan] + scan + } + } + } + + override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = new WriteBuilder { + + override def build(): Write = new Write with RequiresDistributionAndOrdering { + override def requiredDistribution(): Distribution = { + Distributions.clustered(Array(PARTITION_COLUMN_REF)) + } + + override def requiredOrdering(): Array[SortOrder] = { + Array[SortOrder]( + LogicalExpressions.sort( + PARTITION_COLUMN_REF, + SortDirection.ASCENDING, + SortDirection.ASCENDING.defaultNullOrdering()) + ) + } + + override def toBatch: BatchWrite = PartitionBasedReplaceData(configuredScan) + + override def description(): String = "InMemoryWrite" + } + } + + override def description(): String = "InMemoryPartitionReplaceOperation" + } + + private case class PartitionBasedReplaceData(scan: InMemoryBatchScan) extends TestBatchWrite { + + override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized { + val newData = messages.map(_.asInstanceOf[BufferedRows]) + val readRows = scan.data.flatMap(_.asInstanceOf[BufferedRows].rows) Review comment: The current proposal is to leverage existing `SupportsRuntimeFiltering`. Data sources that support row-level operations will need to implement `RowLevelOperation` added in this PR. That one is used to instantiate a scan builder, which means data sources can provide a special scan for row-level operations. Runtime filtering in DS V2 is very flexible and can support metadata columns. In case of Delta, it should be sufficient to have a scan that would report `_file_name` as a filtering attribute and then Spark will execute a subquery and pass unique file names back into the scan. Since the write will have access to the scan, it will also know the set of files that was scanned and needs to be replaced with new files. I was trying to keep the row-level API as simple as possible and rely on existing concepts. This approach builds on top of metadata columns and runtime filtering we already support. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
