alex-ninja commented on a change in pull request #1117: URL: https://github.com/apache/cassandra/pull/1117#discussion_r701360798
########## File path: src/java/org/apache/cassandra/db/virtual/AbstractWritableVirtualTable.java ########## @@ -0,0 +1,287 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.db.virtual; + +import java.nio.ByteBuffer; +import java.util.SortedMap; +import javax.annotation.Nullable; + +import org.apache.cassandra.db.Clustering; +import org.apache.cassandra.db.ClusteringBound; +import org.apache.cassandra.db.ClusteringPrefix; +import org.apache.cassandra.db.DecoratedKey; +import org.apache.cassandra.db.Slice; +import org.apache.cassandra.db.marshal.CompositeType; +import org.apache.cassandra.db.partitions.PartitionUpdate; +import org.apache.cassandra.db.rows.Cell; +import org.apache.cassandra.exceptions.InvalidRequestException; +import org.apache.cassandra.schema.TableMetadata; + +/** + * An abstract virtual table implementation that builds the resultset on demand and allows fine-grained source modification. + */ +public abstract class AbstractWritableVirtualTable extends AbstractVirtualTable +{ + + protected AbstractWritableVirtualTable(TableMetadata metadata) + { + super(metadata); + } + + @Override + public void apply(PartitionUpdate update) + { + DecoratedKey partitionKey = update.partitionKey(); + + if (update.deletionInfo().isLive()) + update.forEach(row -> + { + Clustering<?> clusteringColumns = row.clustering(); + + if (row.deletion().isLive()) + row.forEach(columnMetadata -> + { + if (columnMetadata.column().isComplex()) + throw new InvalidRequestException("Complex type column deletes are not supported by table " + metadata); + + Cell<?> cell = (Cell<?>) columnMetadata; + + if (cell.isTombstone()) + applyColumnDelete(partitionKey, clusteringColumns, cell); + else + applyColumnUpdate(partitionKey, clusteringColumns, cell); + }); + else + applyRowDelete(partitionKey, clusteringColumns); + }); + else + { + // MutableDeletionInfo may have partition delete or range tombstone list or both + if (update.deletionInfo().hasRanges()) + update.deletionInfo() + .rangeIterator(false) + .forEachRemaining(rt -> applyRangeTombstone(partitionKey, rt.deletedSlice())); + + if (!update.deletionInfo().getPartitionDeletion().isLive()) + applyPartitionDelete(partitionKey); + } + } + + protected void applyPartitionDelete(DecoratedKey partitionKey) + { + throw new InvalidRequestException("Partition deletion is not supported by table " + metadata); + } + + protected void applyRangeTombstone(DecoratedKey partitionKey, Slice slice) + { + throw new InvalidRequestException("Range deletion is not supported by table " + metadata); + } + + protected void applyRowDelete(DecoratedKey partitionKey, ClusteringPrefix<?> clusteringColumns) + { + throw new InvalidRequestException("Row deletion is not supported by table " + metadata); + } + + protected void applyColumnDelete(DecoratedKey partitionKey, ClusteringPrefix<?> clusteringColumns, Cell<?> cell) + { + throw new InvalidRequestException("Column deletion is not supported by table " + metadata); + } + + protected abstract void applyColumnUpdate(DecoratedKey partitionKey, ClusteringPrefix<?> clusteringColumns, Cell<?> cell); + + public static abstract class SimpleWritableVirtualTable extends AbstractWritableVirtualTable { + + protected SimpleWritableVirtualTable(TableMetadata metadata) + { + super(metadata); + } + + @Override + protected void applyPartitionDelete(DecoratedKey partitionKey) + { + applyPartitionDelete(extractPartitionKeyColumnValues(partitionKey)); + } + + protected void applyPartitionDelete(Object[] partitionKeyColumnValues) + { + throw new InvalidRequestException("Partition deletion is not supported by table " + metadata); + + } + + @Override + protected void applyRangeTombstone(DecoratedKey partitionKey, Slice slice) + { + ClusteringBound<?> startClusteringColumns = slice.start(); + Object[] startClusteringColumnValues = extractClusteringColumnValues(startClusteringColumns); + + ClusteringBound<?> endClusteringColumns = slice.end(); + Object[] endClusteringColumnValues = extractClusteringColumnValues(endClusteringColumns); + + // It is a prefix of clustering columns that have equal condition. For example, if there are two clustering + // columns c1 and c2, then it will have c1. In case of a single clustering column the prefix is empty. + int clusteringColumnsPrefixLength = Math.max(startClusteringColumnValues.length, endClusteringColumnValues.length) - 1; + Object[] clusteringColumnValuesPrefix = new Object[clusteringColumnsPrefixLength]; + System.arraycopy(startClusteringColumnValues, 0, clusteringColumnValuesPrefix, 0, clusteringColumnsPrefixLength); + + Object startClusteringColumnValue = startClusteringColumns.isBottom() + ? null : startClusteringColumnValues[startClusteringColumnValues.length - 1]; + boolean isStartClusteringColumnInclusive = startClusteringColumns.isInclusive(); + + Object endClusteringColumnValue = endClusteringColumns.isBottom() + ? null : endClusteringColumnValues[endClusteringColumnValues.length - 1]; + boolean isEndClusteringColumnInclusive = endClusteringColumns.isInclusive(); + + applyRangeTombstone(extractPartitionKeyColumnValues(partitionKey), + clusteringColumnValuesPrefix, + startClusteringColumnValue, + isStartClusteringColumnInclusive, + endClusteringColumnValue, + isEndClusteringColumnInclusive); Review comment: I like `Range` idea! Done. Regarding the prefix, the idea is like that: All clustering columns except the last level should have equality condition and range comparison is not applicable to them. For example: `c1='c1_1' AND c2='c2_1' AND c3>'c3_1'` it is impossible to specify `c1>='c1_1' AND c2='c2_1' AND c3>'c3_1'` or `c1='c1_1' AND c2<'c2_1' AND c3>'c3_1'`. So basically prefix contains value for all "first" clustering columns. For this example it would be ["c1_1", "c2_1"]. It was mentioned in the comment for the prefix calculation. Now I moved these details to the javadoc for `applyRangeTombstone`. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]

