dcapwell commented on code in PR #3689:
URL: https://github.com/apache/cassandra/pull/3689#discussion_r1850822923
##########
test/distributed/org/apache/cassandra/fuzz/topology/HarryTopologyMixupTest.java:
##########
@@ -101,28 +110,81 @@ protected void destroyState(State<Spec> state, @Nullable
Throwable cause)
if (cause != null) return;
if (((HarryState) state).numInserts > 0)
{
- // do one last read just to make sure we validate the data...
- var harry = state.schemaSpec.harry;
- harry.validateAll(harry.quiescentChecker());
+ for (Integer pkIdx : state.schema.pkGen.generated())
+ state.schema.harry.selectPartition(pkIdx);
}
}
private static BiFunction<RandomSource, Cluster, Spec>
createSchemaSpec(AccordMode mode)
{
return (rs, cluster) -> {
- long seed = rs.nextLong();
- var schema = HarryHelper.schemaSpecBuilder("harry",
"tbl").surjection().inflate(seed);
+ EntropySource rng = new JdkRandomEntropySource(rs.nextLong());
+ Generator<SchemaSpec> schemaGen;
+ SchemaSpec schema;
if (mode.kind != AccordMode.Kind.None)
- schema = schema.withTransactionMode(mode.passthroughMode);
- ReplayingHistoryBuilder harry = HarryHelper.dataGen(seed,
- mode.kind == AccordMode.Kind.Direct ? new
AccordSut(cluster) : new InJvmSut(cluster),
- new TokenPlacementModel.SimpleReplicationFactor(3),
- SystemUnderTest.ConsistencyLevel.QUORUM,
- schema);
- cluster.schemaChange(String.format("CREATE KEYSPACE %s WITH
replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};",
HarryHelper.KEYSPACE));
- cluster.schemaChange(schema.compile().cql());
+ {
+ schemaGen = SchemaGenerators.schemaSpecGen("harry", "table",
1000,
+
SchemaSpec.optionsBuilder()
+
.withTransactionalMode(mode.transactionalMode)
+
.addWriteTimestamps(!isWriteTimeFromAccord(mode.transactionalMode)));
+ }
+ else
+ schemaGen = SchemaGenerators.schemaSpecGen("harry", "table",
1000);
+
+ schema = schemaGen.generate(rng);
+
+ HistoryBuilder harry = new
ReplayingHistoryBuilder(schema.valueGenerators,
+ hb -> {
+
InJvmDTestVisitExecutor.Builder builder = InJvmDTestVisitExecutor.builder();
+ if
(mode.kind == AccordMode.Kind.Direct)
+ builder
= builder.wrapQueries(QueryBuildingVisitExecutor.WrapQueries.TRANSACTION);
Review Comment:
this isn't working but that looks like a bug with the visitors and not this
test class. the best way to know if we are good is to run in a debugger and
see what queries actually make it to jvm-dtest execute... they should not have
`using timestamp` and they should start with `begin transaction`; if either are
not true we have a regression
##########
test/harry/main/org/apache/cassandra/harry/SchemaSpec.java:
##########
@@ -0,0 +1,460 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.harry;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+
+import accord.utils.Invariants;
+import org.apache.cassandra.cql3.ast.Symbol;
+import org.apache.cassandra.harry.gen.Generator;
+import org.apache.cassandra.harry.gen.Generators;
+import org.apache.cassandra.harry.gen.ValueGenerators;
+import org.apache.cassandra.harry.util.IteratorsUtil;
+import org.apache.cassandra.service.consensus.TransactionalMode;
+import org.apache.cassandra.utils.ByteArrayUtil;
+
+import static org.apache.cassandra.harry.gen.InvertibleGenerator.MAX_ENTROPY;
+
+public class SchemaSpec
+{
+ public final String keyspace;
+ public final String table;
+
+ public final List<ColumnSpec<?>> partitionKeys;
+ public final List<ColumnSpec<?>> clusteringKeys;
+ public final List<ColumnSpec<?>> regularColumns;
+ public final List<ColumnSpec<?>> staticColumns;
+
+ public final List<ColumnSpec<?>> allColumnInSelectOrder;
+ public final ValueGenerators valueGenerators;
+ public final Options options;
+
+ public SchemaSpec(long seed,
+ int populationPerColumn,
+ String keyspace,
+ String table,
+ List<ColumnSpec<?>> partitionKeys,
+ List<ColumnSpec<?>> clusteringKeys,
+ List<ColumnSpec<?>> regularColumns,
+ List<ColumnSpec<?>> staticColumns)
+ {
+ this(seed, populationPerColumn, keyspace, table, partitionKeys,
clusteringKeys, regularColumns, staticColumns, optionsBuilder());
+ }
+
+ @SuppressWarnings({ "unchecked" })
+ public SchemaSpec(long seed,
+ int populationPerColumn,
+ String keyspace,
+ String table,
+ List<ColumnSpec<?>> partitionKeys,
+ List<ColumnSpec<?>> clusteringKeys,
+ List<ColumnSpec<?>> regularColumns,
+ List<ColumnSpec<?>> staticColumns,
+ Options options)
+ {
+ this.keyspace = keyspace;
+ this.table = table;
+ this.options = options;
+
+ this.partitionKeys = Collections.unmodifiableList(new
ArrayList<>(partitionKeys));
+ this.clusteringKeys = Collections.unmodifiableList(new
ArrayList<>(clusteringKeys));
+ this.staticColumns = Collections.unmodifiableList(new
ArrayList<>(staticColumns));
+ this.regularColumns = Collections.unmodifiableList(new
ArrayList<>(regularColumns));
+
+ List<ColumnSpec<?>> staticSelectOrder = new ArrayList<>(staticColumns);
+ staticSelectOrder.sort((s1, s2) ->
ByteArrayUtil.compareUnsigned(s1.name.getBytes(), s2.name.getBytes()));
+ List<ColumnSpec<?>> regularSelectOrder = new
ArrayList<>(regularColumns);
+ regularSelectOrder.sort((s1, s2) ->
ByteArrayUtil.compareUnsigned(s1.name.getBytes(), s2.name.getBytes()));
+
+ List<ColumnSpec<?>> selectOrder = new ArrayList<>();
+ for (ColumnSpec<?> column : IteratorsUtil.concat(partitionKeys,
+ clusteringKeys,
+ staticSelectOrder,
+ regularSelectOrder))
+ selectOrder.add(column);
+ this.allColumnInSelectOrder =
Collections.unmodifiableList(selectOrder);
+
+ // TODO: empty gen
+ this.valueGenerators = ValueGenerators.fromSchema(this, seed,
populationPerColumn);
+ }
+
+ public static /* unsigned */ long cumulativeEntropy(List<ColumnSpec<?>>
columns)
+ {
+ if (columns.isEmpty())
+ return 0;
+
+ long entropy = 1;
+ for (ColumnSpec<?> column : columns)
+ {
+ if (Long.compareUnsigned(column.type.typeEntropy(), MAX_ENTROPY)
== 0)
+ return MAX_ENTROPY;
+
+ long next = entropy * column.type.typeEntropy();
+ if (Long.compareUnsigned(next, entropy) < 0 ||
Long.compareUnsigned(next, column.type.typeEntropy()) < 0)
+ return MAX_ENTROPY;
+
+ entropy = next;
+ }
+
+ return entropy;
+ }
+
+ public static Generator<Object[]> forKeys(List<ColumnSpec<?>> columns)
+ {
+ Generator<?>[] gens = new Generator[columns.size()];
+ for (int i = 0; i < gens.length; i++)
+ gens[i] = columns.get(i).gen;
+ return Generators.zipArray(gens);
+ }
+
+ public String compile()
+ {
+ StringBuilder sb = new StringBuilder();
+
+ sb.append("CREATE TABLE ");
+ if (options.ifNotExists())
+ sb.append("IF NOT EXISTS ");
+
+ sb.append(Symbol.maybeQuote(keyspace))
+ .append(".")
+ .append(Symbol.maybeQuote(table))
+ .append(" (");
+
+ SeparatorAppender commaAppender = new SeparatorAppender();
+ for (ColumnSpec<?> cd : partitionKeys)
+ {
+ commaAppender.accept(sb);
+ sb.append(cd.toCQL());
+ if (partitionKeys.size() == 1 && clusteringKeys.isEmpty())
+ sb.append(" PRIMARY KEY");
+ }
+
+ for (ColumnSpec<?> cd : IteratorsUtil.concat(clusteringKeys,
+ staticColumns,
+ regularColumns))
+ {
+ commaAppender.accept(sb);
+ sb.append(cd.toCQL());
+ }
+
+ if (!clusteringKeys.isEmpty() || partitionKeys.size() > 1)
+ {
+ sb.append(", ").append(getPrimaryKeyCql());
+ }
+
+ // TODO: test
+ if (options.trackLts())
+ sb.append(", ").append("visited_lts list<bigint> static");
+
+ sb.append(')');
+
+ Runnable appendWith = doOnce(() -> sb.append(" WITH"));
+
+ if (options.compactStorage())
+ {
+ appendWith.run();
+ sb.append(" COMPACT STORAGE AND");
+ }
+
+ if (options.transactionalMode() != null)
+ {
+ appendWith.run();
+ sb.append(" transactional_mode =
'").append(options.transactionalMode()).append("' AND");
+ }
+
+ if (options.disableReadRepair())
+ {
+ appendWith.run();
+ sb.append(" read_repair = 'NONE' AND");
+ }
+
+ if (options.compactionStrategy() != null)
+ {
+ appendWith.run();
+ sb.append(" compaction = {'class':
'").append(options.compactionStrategy()).append("'} AND");
+ }
+
+ if (!clusteringKeys.isEmpty())
+ {
+ appendWith.run();
+ sb.append(getClusteringOrderCql())
+ .append(';');
+ }
+
+ return sb.toString();
+ }
+
+ private String getClusteringOrderCql()
+ {
+ StringBuilder sb = new StringBuilder();
+ if (!clusteringKeys.isEmpty())
+ {
+ sb.append(" CLUSTERING ORDER BY (");
+
+ SeparatorAppender commaAppender = new SeparatorAppender();
+ for (ColumnSpec<?> column : clusteringKeys)
+ {
+ commaAppender.accept(sb);
+ sb.append(column.name).append(' ').append(column.isReversed()
? "DESC" : "ASC");
+ }
+
+ sb.append(')');
+ }
+
+ return sb.toString();
+ }
+
+ private String getPrimaryKeyCql()
+ {
+ StringBuilder sb = new StringBuilder();
+ sb.append("PRIMARY KEY (");
+ if (partitionKeys.size() > 1)
+ {
+ sb.append('(');
+ SeparatorAppender commaAppender = new SeparatorAppender();
+ for (ColumnSpec<?> cd : partitionKeys)
+ {
+ commaAppender.accept(sb);
+ sb.append(cd.name);
+ }
+ sb.append(')');
+ }
+ else
+ {
+ sb.append(partitionKeys.get(0).name);
+ }
+
+ for (ColumnSpec<?> cd : clusteringKeys)
+ sb.append(", ").append(cd.name);
+
+ return sb.append(')').toString();
+ }
+
+ public String toString()
+ {
+ return String.format("schema {cql=%s}", compile());
+ }
+
+ private static Runnable doOnce(Runnable r)
+ {
+ return new Runnable()
+ {
+ boolean executed = false;
+
+ public void run()
+ {
+ if (executed)
+ return;
+
+ executed = true;
+ r.run();
+ }
+ };
+ }
+
+ public static class SeparatorAppender implements Consumer<StringBuilder>
+ {
+ boolean isFirst = true;
+ private final String separator;
+
+ public SeparatorAppender()
+ {
+ this(",");
+ }
+
+ public SeparatorAppender(String separator)
+ {
+ this.separator = separator;
+ }
+
+ public void accept(StringBuilder stringBuilder)
+ {
+ if (isFirst)
+ isFirst = false;
+ else
+ stringBuilder.append(separator);
+ }
+
+ public void accept(StringBuilder stringBuilder, String s)
+ {
+ accept(stringBuilder);
+ stringBuilder.append(s);
+ }
+
+
+ public void reset()
+ {
+ isFirst = true;
+ }
+ }
+
+ public boolean equals(Object o)
+ {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ SchemaSpec that = (SchemaSpec) o;
+ return Objects.equals(keyspace, that.keyspace) &&
+ Objects.equals(table, that.table) &&
+ Objects.equals(partitionKeys, that.partitionKeys) &&
+ Objects.equals(clusteringKeys, that.clusteringKeys) &&
+ Objects.equals(regularColumns, that.regularColumns);
+ }
+
+ public int hashCode()
+ {
+ return Objects.hash(keyspace, table, partitionKeys, clusteringKeys,
regularColumns);
+ }
+
+ public static interface Options
+ {
+ String transactionalMode();
Review Comment:
unresolved this comment as this wasn't changed.
##########
test/unit/org/apache/cassandra/cql3/ast/Select.java:
##########
@@ -60,25 +60,26 @@ public class Select implements Statement
public final Optional<OrderBy> orderBy;
public final Optional<Value> limit;
public final boolean allowFiltering;
-
+ public final boolean insertNewLine;
Review Comment:
please remove this from the state, we can add `toCQL` versions that allow
overriding this
patch to fix here:
https://github.com/dcapwell/cassandra/commit/1c8c34914d99633277940ebab1d12c16caba0898
Given these are not harry apis, and APIs I am maintaining, this is a
blocking comment.
##########
test/harry/main/org/apache/cassandra/harry/gen/ValueGenerators.java:
##########
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.harry.gen;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.harry.ColumnSpec;
+import org.apache.cassandra.harry.SchemaSpec;
+import org.apache.cassandra.harry.gen.rng.JdkRandomEntropySource;
+import org.apache.cassandra.harry.util.IteratorsUtil;
+
+import static org.apache.cassandra.harry.gen.InvertibleGenerator.fromType;
+import static org.apache.cassandra.harry.SchemaSpec.cumulativeEntropy;
+import static org.apache.cassandra.harry.SchemaSpec.forKeys;
+
+public class ValueGenerators
+{
+ public final Bijections.IndexedBijection<Object[]> pkGen;
+ public final Bijections.IndexedBijection<Object[]> ckGen;
+
+ public final List<Bijections.IndexedBijection<Object>> regularColumnGens;
+ public final List<Bijections.IndexedBijection<Object>> staticColumnGens;
+
+ public final List<Comparator<Object>> pkComparators;
+ public final List<Comparator<Object>> ckComparators;
+ public final List<Comparator<Object>> regularComparators;
+ public final List<Comparator<Object>> staticComparators;
+
+ public ValueGenerators(Bijections.IndexedBijection<Object[]> pkGen,
+ Bijections.IndexedBijection<Object[]> ckGen,
+ List<Bijections.IndexedBijection<Object>>
regularColumnGens,
+ List<Bijections.IndexedBijection<Object>>
staticColumnGens,
+
+ List<Comparator<Object>> pkComparators,
+ List<Comparator<Object>> ckComparators,
+ List<Comparator<Object>> regularComparators,
+ List<Comparator<Object>> staticComparators)
+ {
+ this.pkGen = pkGen;
+ this.ckGen = ckGen;
+ this.regularColumnGens = regularColumnGens;
+ this.staticColumnGens = staticColumnGens;
+ this.pkComparators = pkComparators;
+ this.ckComparators = ckComparators;
+ this.regularComparators = regularComparators;
+ this.staticComparators = staticComparators;
+ }
+
+ @SuppressWarnings({ "unchecked" })
+ public static ValueGenerators fromSchema(SchemaSpec schema, long seed, int
populationPerColumn)
+ {
+ List<Comparator<Object>> pkComparators = new ArrayList<>();
+ List<Comparator<Object>> ckComparators = new ArrayList<>();
+ List<Comparator<Object>> regularComparators = new ArrayList<>();
+ List<Comparator<Object>> staticComparators = new ArrayList<>();
+
+ EntropySource rng = new JdkRandomEntropySource(seed);
+ for (int i = 0; i < schema.partitionKeys.size(); i++)
+ pkComparators.add((Comparator<Object>)
schema.partitionKeys.get(i).type.comparator());
+ for (int i = 0; i < schema.clusteringKeys.size(); i++)
+ ckComparators.add((Comparator<Object>)
schema.clusteringKeys.get(i).type.comparator());
+ for (int i = 0; i < schema.regularColumns.size(); i++)
+ regularComparators.add((Comparator<Object>)
schema.regularColumns.get(i).type.comparator());
+ for (int i = 0; i < schema.staticColumns.size(); i++)
+ staticComparators.add((Comparator<Object>)
schema.staticColumns.get(i).type.comparator());
+
+ Map<Generator<Object>, InvertibleGenerator<Object>> map = new
HashMap<>();
Review Comment:
if the map switched from `column.gen` to `column` then the logic is fine
(though we should not ever have the case where we replace a generator as
columns *must* be unique
##########
test/harry/main/org/apache/cassandra/harry/execution/QueryBuildingVisitExecutor.java:
##########
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.harry.execution;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import accord.utils.Invariants;
+import org.apache.cassandra.harry.SchemaSpec;
+import org.apache.cassandra.harry.op.Visit;
+import org.apache.cassandra.harry.cql.DeleteHelper;
+import org.apache.cassandra.harry.cql.SelectHelper;
+import org.apache.cassandra.harry.cql.WriteHelper;
+import org.apache.cassandra.harry.op.Operations;
+import org.apache.cassandra.stress.StressAction;
+
+public class QueryBuildingVisitExecutor extends VisitExecutor
+{
+ private static final Logger logger =
LoggerFactory.getLogger(QueryBuildingVisitExecutor.class);
+ protected final SchemaSpec schema;
+ protected final WrapQueries wrapQueries;
+
+ public QueryBuildingVisitExecutor(SchemaSpec schema, WrapQueries
wrapQueries)
+ {
+ this.schema = schema;
+ this.wrapQueries = wrapQueries;
+ }
+
+ public CompiledStatement compile(Visit visit)
+ {
+ beginLts(visit.lts);
+ for (Operations.Operation op : visit.operations)
+ {
+ if (logger.isTraceEnabled())
+ logger.trace("{} {}", visit.lts, op);
+ operation(op);
+ }
+
+ // TODO: try inducing timeouts and checking non-propagation or
discovery
+ endLts(visit.lts);
+ return compiledStatement;
+ }
+
+ /**
+ * Per-LTS state
+ */
+ private final List<String> statements = new ArrayList<>();
+ private final List<Object> bindings = new ArrayList<>();
+ private final Set<Long> visitedPds = new HashSet<>();
+
+ protected List<Operations.SelectStatement> selects = new ArrayList<>();
+ private CompiledStatement compiledStatement = null;
+
+ protected void beginLts(long lts)
+ {
+ statements.clear();
+ bindings.clear();
+ visitedPds.clear();
+ selects.clear();
+ compiledStatement = null;
+ }
+
+ protected void endLts(long lts)
+ {
+ if (statements.isEmpty())
+ {
+ Invariants.checkState(bindings.isEmpty() && visitedPds.isEmpty()
&& selects.isEmpty());
+ return;
+ }
+
+ String query = String.join("\n ", statements);
+
+ Object[] bindingsArray = new Object[bindings.size()];
+ bindings.toArray(bindingsArray);
+ statements.clear();
+ bindings.clear();
+
+ compiledStatement = new CompiledStatement(query, bindingsArray);
+ assert visitedPds.size() == 1 : String.format("Token aware only works
with a single value per token, but got %s", visitedPds);
+ }
+
+ protected void operation(Operations.Operation operation)
+ {
+ if (operation instanceof Operations.PartitionOperation)
+ visitedPds.add(((Operations.PartitionOperation) operation).pd());
+ CompiledStatement statement;
+ switch (operation.kind())
+ {
+ case UPDATE:
+ statement = WriteHelper.inflateUpdate((Operations.WriteOp)
operation, schema, operation.lts());
+ break;
+ case INSERT:
+ statement = WriteHelper.inflateInsert((Operations.WriteOp)
operation, schema, operation.lts());
+ break;
+ case DELETE_RANGE:
+ statement =
DeleteHelper.inflateDelete((Operations.DeleteRange) operation, schema,
operation.lts());
+ break;
+ case DELETE_PARTITION:
+ statement =
DeleteHelper.inflateDelete((Operations.DeletePartition) operation, schema,
operation.lts());
+ break;
+ case DELETE_ROW:
+ statement = DeleteHelper.inflateDelete((Operations.DeleteRow)
operation, schema, operation.lts());
+ break;
+ case DELETE_COLUMNS:
+ statement =
DeleteHelper.inflateDelete((Operations.DeleteColumns) operation, schema,
operation.lts());
+ break;
+ case SELECT_PARTITION:
+ statement = SelectHelper.select((Operations.SelectPartition)
operation, schema);
+ selects.add((Operations.SelectStatement) operation);
+ break;
+ case SELECT_ROW:
+ statement = SelectHelper.select((Operations.SelectRow)
operation, schema);
+ selects.add((Operations.SelectStatement) operation);
+ break;
+ case SELECT_RANGE:
+ statement = SelectHelper.select((Operations.SelectRange)
operation, schema);
+ selects.add((Operations.SelectStatement) operation);
+ break;
+ case SELECT_CUSTOM:
+ statement = SelectHelper.select((Operations.SelectCustom)
operation, schema);
+ selects.add((Operations.SelectStatement) operation);
+ break;
+
+ case CUSTOM:
+ ((Operations.CustomRunnableOperation) operation).execute();
+ return;
+ default:
+ throw new IllegalArgumentException();
+ }
+ statements.add(statement.cql());
+ Collections.addAll(bindings, statement.bindings());
+ }
+
+ private static final String wrapInUnloggedBatchFormat = "BEGIN UNLOGGED
BATCH\n" +
+ " %s\n" +
+ "APPLY BATCH;";
+
+ private static final String wrapInTxnFormat = "BEGIN TRANSACTION\n" +
+ " %s\n" +
+ "COMMIT TRANSACTION;";
+
+ public interface WrapQueries
+ {
+ WrapQueries UNLOGGED_BATCH = (visit, compiled) -> {
+ if (visit.operations.length == 1)
+ return compiled;
Review Comment:
since its an interface I can have my tests do w/e I want... so thats fine
for the default behavior
##########
test/harry/main/org/apache/cassandra/harry/execution/CQLVisitExecutor.java:
##########
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.harry.execution;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import accord.utils.Invariants;
+import org.apache.cassandra.harry.SchemaSpec;
+import org.apache.cassandra.harry.op.Visit;
+import org.apache.cassandra.harry.op.Operations;
+import org.apache.cassandra.harry.model.Model;
+
+/**
+ *
+ * TODO: Transactional results ; LET
+ */
+public abstract class CQLVisitExecutor
+{
+ private static final Logger logger =
LoggerFactory.getLogger(QueryBuildingVisitExecutor.class);
+ protected final SchemaSpec schema;
+
+ protected final DataTracker dataTracker;
+ protected final Model model;
+ private final QueryBuildingVisitExecutor queryBuilder;
+
+ public CQLVisitExecutor(SchemaSpec schema, DataTracker dataTracker, Model
model, QueryBuildingVisitExecutor queryBuilder)
+ {
+ this.schema = schema;
+ this.dataTracker = dataTracker;
+ this.model = model;
+ this.queryBuilder = queryBuilder;
+ }
+
+ public static void replay(CQLVisitExecutor executor, Model.Replay replay)
+ {
+ for (Visit visit : replay)
+ executeVisit(visit, executor, replay);
+ }
+
+ public static void executeVisit(Visit visit, CQLVisitExecutor executor,
Model.Replay replay)
+ {
+ try
+ {
+ executor.execute(visit);
+ }
+ catch (Throwable t)
+ {
+ // Existing issues
+ if (t.getMessage() != null && t.getMessage().contains("class
org.apache.cassandra.db.ReadQuery$1 cannot be cast to class
org.apache.cassandra.db.SinglePartitionReadQuery$Group"))
+ return;
Review Comment:
can you at least add a TODO so grep can find this?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]