aweisberg commented on code in PR #3844:
URL: https://github.com/apache/cassandra/pull/3844#discussion_r1937945414


##########
test/harry/main/org/apache/cassandra/harry/model/ASTSingleTableModel.java:
##########
@@ -426,13 +428,20 @@ public List<BytesPartitionState> getByToken(Token token)
     public void validate(ByteBuffer[][] actual, Select select)
     {
         SelectResult results = getRowsAsByteBuffer(select);
-        if (results.unordered)
+        try
         {
-            validateAnyOrder(factory.selectionOrder, 
toRow(factory.selectionOrder, actual), toRow(factory.selectionOrder, 
results.rows));
+            if (results.unordered)
+            {
+                validateAnyOrder(factory.selectionOrder, 
toRow(factory.selectionOrder, actual), toRow(factory.selectionOrder, 
results.rows));
+            }
+            else
+            {
+                validate(actual, results.rows);
+            }
         }
-        else
+        catch (AssertionError e)
         {
-            validate(actual, results.rows);
+            throw new AssertionError("Unexpected results for query: " + 
StringUtils.escapeControlChars(select.visit(StandardVisitors.DEBUG).toCQL()), 
e);

Review Comment:
   Should this use the visitor in `StatefulASTBase`?



##########
test/distributed/org/apache/cassandra/distributed/test/cql3/RepoTest.java:
##########
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.cql3;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.util.function.BiConsumer;
+import java.util.regex.Pattern;
+
+import com.google.common.base.Splitter;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.datastax.driver.core.SimpleStatement;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.test.JavaDriverUtils;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.assertj.core.api.Assertions;
+
+/**
+ * This test exists to help isolate issues with {@link 
SingleNodeTableWalkTest} and related classes.
+ */
+@Ignore
+public class RepoTest extends TestBaseImpl

Review Comment:
   I don't see this on `cep-15-accord` is it temporary?



##########
test/distributed/org/apache/cassandra/distributed/test/cql3/SingleNodeTableWalkTest.java:
##########
@@ -534,7 +534,7 @@ public boolean allowNonPartitionMultiColumnQuery()
 
         private List<Symbol> multiColumnQueryColumns()
         {
-            List<Symbol> allowedColumns = model.factory.selectionOrder;
+            List<Symbol> allowedColumns = searchableColumns;

Review Comment:
   I don't quite understand why the number of columns in the partition key 
determines the usage of `selectionOrder` vs `nonPartitionColumns`. 
`selectionOrder` is all columns and `nonPartitionColumns` I assume the non-pkey 
columns.



##########
test/harry/main/org/apache/cassandra/harry/model/ASTSingleTableModel.java:
##########
@@ -0,0 +1,1399 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.harry.model;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.function.IntFunction;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.annotation.Nullable;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import accord.utils.Invariants;
+import org.apache.cassandra.cql3.ast.Conditional;
+import org.apache.cassandra.cql3.ast.Conditional.Where.Inequality;
+import org.apache.cassandra.cql3.ast.Element;
+import org.apache.cassandra.cql3.ast.Expression;
+import org.apache.cassandra.cql3.ast.ExpressionEvaluator;
+import org.apache.cassandra.cql3.ast.FunctionCall;
+import org.apache.cassandra.cql3.ast.Mutation;
+import org.apache.cassandra.cql3.ast.Select;
+import org.apache.cassandra.cql3.ast.StandardVisitors;
+import org.apache.cassandra.cql3.ast.Symbol;
+import org.apache.cassandra.db.BufferClustering;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.harry.model.BytesPartitionState.PrimaryKey;
+import org.apache.cassandra.harry.util.StringUtils;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.tools.nodetool.formatter.TableBuilder;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.ImmutableUniqueList;
+import org.apache.cassandra.utils.Pair;
+
+import static org.apache.cassandra.harry.model.BytesPartitionState.asCQL;
+
+public class ASTSingleTableModel
+{
+    public final BytesPartitionState.Factory factory;
+    private final TreeMap<BytesPartitionState.Ref, BytesPartitionState> 
partitions = new TreeMap<>();
+
+    public ASTSingleTableModel(TableMetadata metadata)
+    {
+        this.factory = new BytesPartitionState.Factory(metadata);
+    }
+
+    public NavigableSet<BytesPartitionState.Ref> partitionKeys()
+    {
+        return partitions.navigableKeySet();
+    }
+
+    public int size()
+    {
+        return partitions.size();
+    }
+
+    public boolean isEmpty()
+    {
+        return partitions.isEmpty();
+    }
+
+    public TreeMap<ByteBuffer, List<PrimaryKey>> index(BytesPartitionState.Ref 
ref, Symbol symbol)
+    {
+        if (factory.partitionColumns.contains(symbol))
+            throw new AssertionError("When indexing based off a single 
partition, unable to index partition columns; given " + symbol.detailedName());
+        BytesPartitionState partition = get(ref);
+        Invariants.nonNull(partition, "Unable to index %s; null partition %s", 
symbol, ref);
+        TreeMap<ByteBuffer, List<PrimaryKey>> index = new 
TreeMap<>(symbol.type()::compare);
+        if (factory.staticColumns.contains(symbol))
+            return indexStaticColumn(index, symbol, partition);
+        return indexRowColumn(index, symbol, partition);
+    }
+
+    public TreeMap<ByteBuffer, List<PrimaryKey>> index(Symbol symbol)
+    {
+        TreeMap<ByteBuffer, List<PrimaryKey>> index = new 
TreeMap<>(symbol.type()::compare);
+        if (factory.partitionColumns.contains(symbol))
+            return indexPartitionColumn(index, symbol);
+        if (factory.staticColumns.contains(symbol))
+            return indexStaticColumn(index, symbol);
+        return indexRowColumn(index, symbol);
+    }
+
+    private TreeMap<ByteBuffer, List<PrimaryKey>> 
indexPartitionColumn(TreeMap<ByteBuffer, List<PrimaryKey>> index, Symbol symbol)
+    {
+        int offset = factory.partitionColumns.indexOf(symbol);
+        for (BytesPartitionState partition : partitions.values())
+        {
+            if (partition.isEmpty()) continue;
+            ByteBuffer bb = partition.key.bufferAt(offset);
+            List<PrimaryKey> list = index.computeIfAbsent(bb, i -> new 
ArrayList<>());
+            for (BytesPartitionState.Row row : partition.rows())
+                list.add(row.ref());
+        }
+        return index;
+    }
+
+    private TreeMap<ByteBuffer, List<PrimaryKey>> 
indexStaticColumn(TreeMap<ByteBuffer, List<PrimaryKey>> index, Symbol symbol)
+    {
+        for (BytesPartitionState partition : partitions.values())
+            indexStaticColumn(index, symbol, partition);
+        return index;
+    }
+
+    private TreeMap<ByteBuffer, List<PrimaryKey>> 
indexStaticColumn(TreeMap<ByteBuffer, List<PrimaryKey>> index, Symbol symbol, 
BytesPartitionState partition)
+    {
+        if (partition.isEmpty()) return index;
+        ByteBuffer bb = partition.staticRow().get(symbol);
+        if (bb == null)
+            return index;
+        List<PrimaryKey> list = index.computeIfAbsent(bb, i -> new 
ArrayList<>());
+        for (BytesPartitionState.Row row : partition.rows())
+            list.add(row.ref());
+        return index;
+    }
+
+    private TreeMap<ByteBuffer, List<PrimaryKey>> 
indexRowColumn(TreeMap<ByteBuffer, List<PrimaryKey>> index, Symbol symbol)
+    {
+        boolean clustering = factory.clusteringColumns.contains(symbol);
+        int offset = clustering ? factory.clusteringColumns.indexOf(symbol) : 
factory.regularColumns.indexOf(symbol);
+        for (BytesPartitionState partition : partitions.values())
+            indexRowColumn(index, clustering, offset, partition);
+        return index;
+    }
+
+    private TreeMap<ByteBuffer, List<PrimaryKey>> 
indexRowColumn(TreeMap<ByteBuffer, List<PrimaryKey>> index, Symbol symbol, 
BytesPartitionState partition)
+    {
+        boolean clustering = factory.clusteringColumns.contains(symbol);
+        int offset = clustering ? factory.clusteringColumns.indexOf(symbol) : 
factory.regularColumns.indexOf(symbol);
+        indexRowColumn(index, clustering, offset, partition);
+        return index;
+    }
+
+    private void indexRowColumn(TreeMap<ByteBuffer, List<PrimaryKey>> index, 
boolean clustering, int offset, BytesPartitionState partition)
+    {
+        if (partition.isEmpty()) return;
+        for (BytesPartitionState.Row row : partition.rows())
+        {
+            ByteBuffer bb = clustering ? row.clustering.bufferAt(offset) : 
row.get(offset);
+            if (bb == null)
+                continue;
+            index.computeIfAbsent(bb, i -> new ArrayList<>()).add(row.ref());
+        }
+    }
+
+    public void update(Mutation mutation)
+    {
+        switch (mutation.kind)
+        {
+            case INSERT:
+                update((Mutation.Insert) mutation);
+                break;
+            case UPDATE:
+                update((Mutation.Update) mutation);
+                break;
+            case DELETE:
+                update((Mutation.Delete) mutation);
+                break;
+            default:
+                throw new UnsupportedOperationException(mutation.kind.name());
+        }
+    }
+
+    public void update(Mutation.Insert insert)
+    {
+        Clustering<ByteBuffer> pd = pd(insert);
+        BytesPartitionState partition = partitions.get(factory.createRef(pd));
+        if (partition == null)
+        {
+            partition = factory.create(pd);
+            partitions.put(partition.ref(), partition);
+        }
+        Map<Symbol, Expression> values = insert.values;
+        if (!factory.staticColumns.isEmpty() && 
!Sets.intersection(factory.staticColumns.asSet(), values.keySet()).isEmpty())
+        {
+            // static columns to add in.  If we are doing something like += to 
a row that doesn't exist, we still update statics...
+            Map<Symbol, ByteBuffer> write = new HashMap<>();
+            for (Symbol col : Sets.intersection(factory.staticColumns.asSet(), 
values.keySet()))
+                write.put(col, eval(values.get(col)));
+            partition.setStaticColumns(write);
+        }
+        Map<Symbol, ByteBuffer> write = new HashMap<>();
+        for (Symbol col : Sets.intersection(factory.regularColumns.asSet(), 
values.keySet()))
+            write.put(col, eval(values.get(col)));
+        partition.setColumns(key(insert.values, factory.clusteringColumns),
+                             write,
+                             true);
+    }
+
+    public void update(Mutation.Update update)
+    {
+        var split = splitOnPartition(update.where.simplify());
+        List<Clustering<ByteBuffer>> pks = split.left;
+        List<Conditional> remaining = split.right;
+        for (Clustering<ByteBuffer> pd : pks)
+        {
+            BytesPartitionState partition = 
partitions.get(factory.createRef(pd));
+            if (partition == null)
+            {
+                partition = factory.create(pd);
+                partitions.put(partition.ref(), partition);
+            }
+            Map<Symbol, Expression> set = update.set;
+            if (!factory.staticColumns.isEmpty() && 
!Sets.intersection(factory.staticColumns.asSet(), set.keySet()).isEmpty())
+            {
+                // static columns to add in.  If we are doing something like 
+= to a row that doesn't exist, we still update statics...
+                Map<Symbol, ByteBuffer> write = new HashMap<>();
+                for (Symbol col : 
Sets.intersection(factory.staticColumns.asSet(), set.keySet()))
+                    write.put(col, eval(set.get(col)));
+                partition.setStaticColumns(write);
+            }
+            for (Clustering<ByteBuffer> cd : clustering(remaining))
+            {
+                Map<Symbol, ByteBuffer> write = new HashMap<>();
+                for (Symbol col : 
Sets.intersection(factory.regularColumns.asSet(), set.keySet()))
+                    write.put(col, eval(set.get(col)));
+
+                partition.setColumns(cd, write, false);
+            }
+        }
+    }
+
+    private enum DeleteKind
+    {PARTITION, ROW, COLUMN}
+
+    public void update(Mutation.Delete delete)
+    {
+        //TODO (coverage): range deletes
+        var split = splitOnPartition(delete.where.simplify());
+        List<Clustering<ByteBuffer>> pks = split.left;
+        List<Clustering<ByteBuffer>> clusterings = split.right.isEmpty() ? 
Collections.emptyList() : clustering(split.right);
+        HashSet<Symbol> columns = delete.columns.isEmpty() ? null : new 
HashSet<>(delete.columns);
+        for (Clustering<ByteBuffer> pd : pks)
+        {
+            BytesPartitionState partition = 
partitions.get(factory.createRef(pd));
+            if (partition == null) return; // can't delete a partition that 
doesn't exist...
+
+            DeleteKind kind = DeleteKind.PARTITION;
+            if (!delete.columns.isEmpty())
+                kind = DeleteKind.COLUMN;
+            else if (!clusterings.isEmpty())
+                kind = DeleteKind.ROW;
+
+            switch (kind)
+            {
+                case PARTITION:
+                    partitions.remove(partition.ref());
+                    break;
+                case ROW:
+                    for (Clustering<ByteBuffer> cd : clusterings)
+                    {
+                        partition.deleteRow(cd);
+                        if (partition.shouldDelete())
+                            partitions.remove(partition.ref());
+                    }
+                    break;
+                case COLUMN:
+                    if (clusterings.isEmpty())
+                    {
+                        partition.deleteStaticColumns(columns);
+                    }
+                    else
+                    {
+                        for (Clustering<ByteBuffer> cd : clusterings)
+                        {
+                            partition.deleteColumns(cd, columns);
+                            if (partition.shouldDelete())
+                                partitions.remove(partition.ref());
+                        }
+                    }
+                    break;
+//                case SLICE:
+//                case RANGE:
+                default:
+                    throw new UnsupportedOperationException();
+            }
+        }
+    }
+
+    private List<Clustering<ByteBuffer>> clustering(List<Conditional> 
conditionals)
+    {
+        if (conditionals.isEmpty())
+        {
+            if (factory.clusteringColumns.isEmpty()) return 
Collections.singletonList(Clustering.EMPTY);
+            throw new IllegalArgumentException("No clustering columns defined 
in the WHERE clause, but clustering columns exist; expected " + 
factory.clusteringColumns);
+        }
+        var split = splitOnClustering(conditionals);
+        var clusterings = split.left;
+        var remaining = split.right;
+        if (!remaining.isEmpty())
+            throw new IllegalArgumentException("Non Partition/Clustering 
columns found in WHERE clause; " + 
remaining.stream().map(Element::toCQL).collect(Collectors.joining(", ")));
+        return clusterings;
+    }
+
+    private Pair<List<Clustering<ByteBuffer>>, List<Conditional>> 
splitOnPartition(List<Conditional> conditionals)
+    {
+        return splitOn(factory.partitionColumns.asSet(), conditionals);
+    }
+
+    private Pair<List<Clustering<ByteBuffer>>, List<Conditional>> 
splitOnClustering(List<Conditional> conditionals)
+    {
+        return splitOn(factory.clusteringColumns.asSet(), conditionals);
+    }
+
+    private Pair<List<Clustering<ByteBuffer>>, List<Conditional>> 
splitOn(ImmutableUniqueList<Symbol>.AsSet columns, List<Conditional> 
conditionals)
+    {
+        // pk requires equality
+        Map<Symbol, Set<ByteBuffer>> pks = new HashMap<>();
+        List<Conditional> other = new ArrayList<>();
+        for (Conditional c : conditionals)
+        {
+            if (c instanceof Conditional.Where)
+            {
+                Conditional.Where w = (Conditional.Where) c;
+                if (w.kind == Inequality.EQUAL && columns.contains(w.lhs))
+                {
+                    Symbol col = (Symbol) w.lhs;
+                    ByteBuffer bb = eval(w.rhs);
+                    if (pks.containsKey(col))
+                        throw new IllegalArgumentException("Partition column " 
+ col + " was defined multiple times in the WHERE clause");
+                    pks.put(col, Collections.singleton(bb));
+                }
+                else
+                {
+                    other.add(c);
+                }
+            }
+            else if (c instanceof Conditional.In)
+            {
+                Conditional.In i = (Conditional.In) c;
+                if (columns.contains(i.ref))
+                {
+                    Symbol col = (Symbol) i.ref;
+                    if (pks.containsKey(col))
+                        throw new IllegalArgumentException("Partition column " 
+ col + " was defined multiple times in the WHERE clause");
+                    var set = 
i.expressions.stream().map(ASTSingleTableModel::eval).collect(Collectors.toSet());
+                    pks.put(col, set);
+                }
+                else
+                {
+                    other.add(c);
+                }
+            }
+            else
+            {
+                other.add(c);
+            }
+        }
+        if (!columns.equals(pks.keySet()))
+        {
+            var missing = Sets.difference(columns, pks.keySet());
+            throw new AssertionError("Unable to find expected columns " + 
missing);
+        }
+
+        List<Clustering<ByteBuffer>> partitionKeys = keys(columns, pks);
+        return Pair.create(partitionKeys, other);
+    }
+
+    private List<Clustering<ByteBuffer>> keys(Collection<Symbol> columns, 
Map<Symbol, Set<ByteBuffer>> pks)
+    {
+        //TODO (coverage): handle IN
+        ByteBuffer[] bbs = new ByteBuffer[columns.size()];
+        int idx = 0;
+        for (Symbol s : columns)
+        {
+            Set<ByteBuffer> values = pks.get(s);
+            if (values.size() > 1)
+                throw new UnsupportedOperationException("IN clause is 
currently unsupported... its on the backlog!");
+            bbs[idx++] = Iterables.getFirst(values, null);
+        }
+        return Collections.singletonList(BufferClustering.make(bbs));
+    }
+
+    private Clustering<ByteBuffer> pd(Mutation.Insert mutation)
+    {
+        return key(mutation.values, factory.partitionColumns);
+    }
+
+    public BytesPartitionState get(BytesPartitionState.Ref ref)
+    {
+        return partitions.get(ref);
+    }
+
+    public List<BytesPartitionState> getByToken(Token token)
+    {
+        NavigableSet<BytesPartitionState.Ref> keys = 
partitions.navigableKeySet();
+        // To support the case where 2+ keys share the same token, need to 
create a token ref before and after the token, to make sure
+        // the head/tail sets find the matches correctly
+        NavigableSet<BytesPartitionState.Ref> matches = 
keys.headSet(factory.createRef(token, true), true)
+                                                            
.tailSet(factory.createRef(token, false), true);
+        if (matches.isEmpty()) return Collections.emptyList();
+        return 
matches.stream().map(partitions::get).collect(Collectors.toList());
+    }
+
+    public void validate(ByteBuffer[][] actual, Select select)
+    {
+        SelectResult results = getRowsAsByteBuffer(select);
+        try
+        {
+            if (results.unordered)
+            {
+                validateAnyOrder(factory.selectionOrder, 
toRow(factory.selectionOrder, actual), toRow(factory.selectionOrder, 
results.rows));
+            }
+            else
+            {
+                validate(actual, results.rows);
+            }
+        }
+        catch (AssertionError e)
+        {
+            AssertionError error = new AssertionError("Unexpected results for 
query: " + 
StringUtils.escapeControlChars(select.visit(StandardVisitors.DEBUG).toCQL()), 
e);
+            // This stack trace is not helpful, this error message is trying 
to improve the error returned to know what query failed, so the stack trace 
only adds noise
+            error.setStackTrace(new StackTraceElement[0]);
+            throw error;
+        }
+    }
+
+    public void validate(ByteBuffer[][] actual, ByteBuffer[][] expected)
+    {
+        validate(factory.selectionOrder, actual, expected);
+    }
+
+    private static void validate(ImmutableUniqueList<Symbol> columns, 
ByteBuffer[][] actual, ByteBuffer[][] expected)
+    {
+        // check any order
+        validateAnyOrder(columns, toRow(columns, actual), toRow(columns, 
expected));
+        // order matched, but are there duplicates?
+        validateNoDuplicates(columns, actual, expected);
+        // all rows match, and there are no duplicates... but are they in the 
right order?
+        validateOrder(columns, actual, expected);
+    }
+
+    private static void validateAnyOrder(ImmutableUniqueList<Symbol> columns, 
Set<Row> actual, Set<Row> expected)
+    {
+        var unexpected = Sets.difference(actual, expected);
+        var missing = Sets.difference(expected, actual);
+        StringBuilder sb = null;
+        if (!unexpected.isEmpty())
+        {
+            sb = new StringBuilder();
+            sb.append("Unexpected rows found:\n").append(table(columns, 
unexpected));
+        }
+
+        if (!missing.isEmpty())
+        {
+            if (sb == null)
+            {
+                sb = new StringBuilder();
+            }
+            else
+            {
+                sb.append('\n');
+            }
+            if (actual.isEmpty()) sb.append("No rows returned");
+            else sb.append("Missing rows:\n").append(table(columns, missing));
+        }
+        if (sb != null)
+        {
+            sb.append("\nExpected:\n").append(table(columns, expected));
+            throw new AssertionError(sb.toString());
+        }
+    }
+
+    private static void validateNoDuplicates(ImmutableUniqueList<Symbol> 
columns, ByteBuffer[][] actual, ByteBuffer[][] expected)
+    {
+        // validateAnyOrder was run first, which made sure that all rows 
match, but that used sets which avoids duplicates
+        // this means that duplicates can only happen if-and-only-if the 
lengths do not match...
+        //TODO (correctness): what edge cases actually allow duplicates?  
aggregates would make sense...
+        if (actual.length == expected.length) return;
+        StringBuilder sb = null;
+        if (actual.length > expected.length)
+        {
+            // the response had a duplicate
+            Set<Row> set = new HashSet<>();
+            int rowId = 0;
+            for (ByteBuffer[] bbs : actual)
+            {
+                Row row = new Row(columns, bbs);
+                if (!set.add(row))
+                {
+                    if (sb == null)
+                        sb = new StringBuilder();
+                    sb.append("Duplicate row in response at row 
").append(rowId).append(": ").append(row).append('\n');
+                }
+                rowId++;
+            }
+        }
+        else if (expected.length > actual.length)
+        {
+            //TODO (correctness): the model expected a duplicate, but was not 
found in the response
+        }
+        if (sb != null)
+        {
+            sb.append("\nExpected:\n").append(table(columns, expected));
+            sb.append("\nActual:\n").append(table(columns, actual));
+            throw new AssertionError(sb.toString());
+        }
+    }
+
+    private static void validateOrder(ImmutableUniqueList<Symbol> columns, 
ByteBuffer[][] actual, ByteBuffer[][] expected)
+    {
+        StringBuilder sb = null;
+        for (int i = 0, size = Math.min(actual.length, expected.length); i < 
size; i++)
+        {
+            ByteBuffer[] as = actual[i];
+            ByteBuffer[] es = expected[i];
+            if (as.length != es.length)
+            {
+                if (sb == null)
+                    sb = new StringBuilder();
+                sb.append("\nExpected number of columns does not match");
+            }
+            for (int c = 0, cs = Math.min(as.length, es.length); c < cs; c++)
+            {
+                ByteBuffer a = as[c];
+                ByteBuffer e = es[c];
+                if (!Objects.equals(a, e))
+                {
+                    Symbol symbol = columns.get(c);
+                    if (sb == null)
+                        sb = new StringBuilder();
+                    sb.append(String.format("\nIncorrect value for row %d 
column %s: expected %s but was %s", i, symbol,
+                                            e == null ? "null" : 
symbol.type().asCQL3Type().toCQLLiteral(e),
+                                            a == null ? "null" : 
symbol.type().asCQL3Type().toCQLLiteral(a)));
+                }
+            }
+        }
+
+        if (sb != null)
+        {
+            sb.append("\nExpected:\n").append(table(columns, expected));
+            sb.append("\nActual:\n").append(table(columns, actual));
+            throw new AssertionError(sb.toString());
+        }
+    }
+
+    private static String table(ImmutableUniqueList<Symbol> columns, 
Collection<Row> rows)
+    {
+        return 
TableBuilder.toStringPiped(columns.stream().map(Symbol::toCQL).collect(Collectors.toList()),
+                                          // intellij or junit can be tripped 
up by utf control or invisible chars, so this logic tries to normalize to make 
things more safe
+                                          () -> rows.stream()
+                                                    .map(r -> 
r.asCQL().stream().map(StringUtils::escapeControlChars).collect(Collectors.toList()))
+                                                    .iterator());
+    }
+
+    private static String table(ImmutableUniqueList<Symbol> columns, 
ByteBuffer[][] rows)
+    {
+        return 
TableBuilder.toStringPiped(columns.stream().map(Symbol::toCQL).collect(Collectors.toList()),
+                                          () -> Stream.of(rows).map(row -> 
asCQL(columns, row)).iterator());
+    }
+
+    private static Set<Row> toRow(ImmutableUniqueList<Symbol> columns, 
ByteBuffer[][] rows)
+    {
+        Set<Row> set = new HashSet<>();
+        for (ByteBuffer[] row : rows)
+            set.add(new Row(columns, row));
+        return set;
+    }
+
+    private static class SelectResult
+    {
+        private final ByteBuffer[][] rows;
+        private final boolean unordered;
+
+        private SelectResult(ByteBuffer[][] rows, boolean unordered)
+        {
+            this.rows = rows;
+            this.unordered = unordered;
+        }
+    }
+
+    private SelectResult getRowsAsByteBuffer(Select select)
+    {
+        if (select.where.isEmpty())
+            return all();
+        LookupContext ctx = context(select);
+        List<PrimaryKey> primaryKeys;
+        if (ctx.unmatchable)
+        {
+            primaryKeys = Collections.emptyList();
+        }
+        else if (ctx.eq.keySet().containsAll(factory.partitionColumns))
+        {
+            // tested

Review Comment:
   Are these comments supposed to stay? Are they just tracking what you already 
wrote tests for?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: pr-unsubscr...@cassandra.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: pr-unsubscr...@cassandra.apache.org
For additional commands, e-mail: pr-h...@cassandra.apache.org

Reply via email to