This is an automated email from the ASF dual-hosted git repository. ifesdjeen pushed a commit to branch CASSANDRA-16262-2 in repository https://gitbox.apache.org/repos/asf/cassandra-harry.git
commit f22d52e8e995411fd8190c8d282582975a8e02b1 Author: Alex Petrov <[email protected]> AuthorDate: Mon Jul 12 17:04:37 2021 +0200 Core improvements Major features: * Implement updates * Make sure we can advance RNGs from zero as well * Fix a problem with predictable descriptor Bugfixes: * Fix column mask inconsistencies * Fix a problem with partition key liveness info Quality of life improvements: * Get rid of driver dependency for query generation * Get rid of guava dependency * Add reusable config files * Switch from streams to iterables General improvements: * Make unset and nil descriptors more distinct and harder to generate particularly for the smaller descriptors * Fixed schema configurator to allow empty column sets * Move workloads to a common dir * Fixed schema configurator to output correct json * No-op checker to execute with Quorum, not ALL * Make tag for build unique Patch by Alex Petrov for CASSANDRA-16262 --- harry-core/pom.xml | 9 +- harry-core/src/harry/core/Configuration.java | 44 +++++- harry-core/src/harry/core/VisibleForTesting.java | 5 + harry-core/src/harry/corruptor/RowCorruptor.java | 2 +- harry-core/src/harry/ddl/ColumnSpec.java | 10 +- harry-core/src/harry/ddl/SchemaGenerators.java | 76 +++++---- harry-core/src/harry/ddl/SchemaSpec.java | 102 ++++++++++--- .../src/harry/generators/DataGenerators.java | 17 ++- .../src/harry/generators/RandomGenerator.java | 3 +- harry-core/src/harry/generators/RngUtils.java | 6 +- harry-core/src/harry/generators/Surjections.java | 2 +- .../harry/model/AlwaysSamePartitionSelector.java | 69 +++++++++ harry-core/src/harry/model/NoOpChecker.java | 3 +- harry-core/src/harry/model/OpSelectors.java | 54 +++++-- harry-core/src/harry/model/SelectHelper.java | 114 ++++++++++---- .../model/clock/ApproximateMonotonicClock.java | 3 +- harry-core/src/harry/model/clock/OffsetClock.java | 20 +++ harry-core/src/harry/model/sut/PrintlnSut.java | 18 +++ harry-core/src/harry/operations/DeleteHelper.java | 61 ++++---- harry-core/src/harry/operations/Relation.java | 80 ++-------- harry-core/src/harry/operations/WriteHelper.java | 170 +++++++++++---------- harry-core/src/harry/reconciler/Reconciler.java | 61 +++++--- .../harry/runner/CorruptingPartitionVisitor.java | 1 - harry-core/src/harry/runner/DataTracker.java | 3 + .../src/harry/runner/DefaultDataTracker.java | 2 +- .../src/harry/runner/MutatingPartitionVisitor.java | 1 + .../src/harry/runner/MutatingRowVisitor.java | 44 +++++- harry-core/src/harry/runner/Operation.java | 20 ++- harry-core/src/harry/runner/QueryGenerator.java | 2 +- harry-core/src/harry/util/BitSet.java | 2 +- harry-core/src/harry/util/TestRunner.java | 9 +- harry-core/test/harry/model/OpSelectorsTest.java | 34 +++-- 32 files changed, 691 insertions(+), 356 deletions(-) diff --git a/harry-core/pom.xml b/harry-core/pom.xml index fef070d..d313e7a 100755 --- a/harry-core/pom.xml +++ b/harry-core/pom.xml @@ -33,10 +33,11 @@ <name>Harry Core</name> <dependencies> - <dependency> - <groupId>com.datastax.cassandra</groupId> - <artifactId>cassandra-driver-core</artifactId> - </dependency> + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-api</artifactId> + <version>1.7.25</version> + </dependency> <dependency> <groupId>org.apache.commons</groupId> diff --git a/harry-core/src/harry/core/Configuration.java b/harry-core/src/harry/core/Configuration.java index bd011c2..4a2a1e6 100644 --- a/harry-core/src/harry/core/Configuration.java +++ b/harry-core/src/harry/core/Configuration.java @@ -38,10 +38,13 @@ import harry.ddl.SchemaGenerators; import harry.ddl.SchemaSpec; import harry.generators.Surjections; import harry.generators.distribution.Distribution; +import harry.model.AlwaysSamePartitionSelector; import harry.model.Model; import harry.model.OpSelectors; import harry.model.QuiescentChecker; import harry.model.clock.ApproximateMonotonicClock; +import harry.model.clock.OffsetClock; +import harry.model.sut.PrintlnSut; import harry.model.sut.SystemUnderTest; import harry.runner.AllPartitionsValidator; import harry.runner.CorruptingPartitionVisitor; @@ -92,6 +95,11 @@ public class Configuration mapper.registerSubtypes(CorruptingPartitionVisitorConfiguration.class); mapper.registerSubtypes(RecentPartitionsValidatorConfiguration.class); mapper.registerSubtypes(FixedSchemaProviderConfiguration.class); + mapper.registerSubtypes(AlwaysSamePartitionSelector.AlwaysSamePartitionSelectorConfiguration.class); + mapper.registerSubtypes(OffsetClock.OffsetClockConfiguration.class); + mapper.registerSubtypes(PrintlnSut.PrintlnSutConfiguration.class); + mapper.registerSubtypes(NoOpDataTrackerConfiguration.class); + mapper.registerSubtypes(NoOpMetricReporterConfiguration.class); } public final long seed; @@ -407,7 +415,7 @@ public class Configuration } - @JsonTypeName("no_op_tracker") + @JsonTypeName("no_op") public static class NoOpDataTrackerConfiguration implements DataTrackerConfiguration { @JsonCreator @@ -613,7 +621,7 @@ public class Configuration } } - @JsonTypeName("no_op_checker") + @JsonTypeName("no_op") public static class NoOpCheckerConfig implements ModelConfiguration { @JsonCreator @@ -692,7 +700,7 @@ public class Configuration private Map<OpSelectors.OperationKind, Integer> operation_kind_weights = new OperationKindSelectorBuilder() .addWeight(OpSelectors.OperationKind.DELETE_ROW, 1) .addWeight(OpSelectors.OperationKind.DELETE_COLUMN, 1) - .addWeight(OpSelectors.OperationKind.WRITE, 98) + .addWeight(OpSelectors.OperationKind.INSERT, 98) .build(); private Map<OpSelectors.OperationKind, long[]> column_mask_bitsets; private int[] fractions; @@ -1057,6 +1065,12 @@ public class Configuration @JsonTypeName("fixed") public static class FixedSchemaProviderConfiguration implements SchemaProviderConfiguration { + public final String keyspace; + public final String table; + public final Map<String, String> partition_keys; + public final Map<String, String> clustering_keys; + public final Map<String, String> regular_columns; + public final Map<String, String> static_keys; private final SchemaSpec schemaSpec; @JsonCreator @@ -1067,10 +1081,28 @@ public class Configuration @JsonProperty("regular_columns") Map<String, String> regulars, @JsonProperty("static_columns") Map<String, String> statics) { - this.schemaSpec = SchemaGenerators.parse(keyspace, table, - pks, cks, regulars, statics); + this(SchemaGenerators.parse(keyspace, table, + pks, cks, regulars, statics), + pks, + cks, + regulars, + statics); } + public FixedSchemaProviderConfiguration(SchemaSpec schemaSpec, + Map<String, String> pks, + Map<String, String> cks, + Map<String, String> regulars, + Map<String, String> statics) + { + this.schemaSpec = schemaSpec; + this.keyspace = schemaSpec.keyspace; + this.table = schemaSpec.table; + this.partition_keys = pks; + this.clustering_keys = cks; + this.regular_columns = regulars; + this.static_keys = statics; + } public SchemaSpec make(long seed) { return schemaSpec; @@ -1082,7 +1114,7 @@ public class Configuration { } - @JsonTypeName("default") + @JsonTypeName("no_op") public static class NoOpMetricReporterConfiguration implements MetricReporterConfiguration { public MetricReporter make() diff --git a/harry-core/src/harry/core/VisibleForTesting.java b/harry-core/src/harry/core/VisibleForTesting.java new file mode 100644 index 0000000..efa712e --- /dev/null +++ b/harry-core/src/harry/core/VisibleForTesting.java @@ -0,0 +1,5 @@ +package harry.core; + +public @interface VisibleForTesting { +} + diff --git a/harry-core/src/harry/corruptor/RowCorruptor.java b/harry-core/src/harry/corruptor/RowCorruptor.java index 4c6b005..7b19cf1 100644 --- a/harry-core/src/harry/corruptor/RowCorruptor.java +++ b/harry-core/src/harry/corruptor/RowCorruptor.java @@ -29,7 +29,7 @@ import harry.operations.CompiledStatement; public interface RowCorruptor { - Logger logger = LoggerFactory.getLogger(QueryResponseCorruptor.class); + final Logger logger = LoggerFactory.getLogger(QueryResponseCorruptor.class); boolean canCorrupt(ResultSetRow row); diff --git a/harry-core/src/harry/ddl/ColumnSpec.java b/harry-core/src/harry/ddl/ColumnSpec.java index 94e9881..6d652c4 100644 --- a/harry-core/src/harry/ddl/ColumnSpec.java +++ b/harry-core/src/harry/ddl/ColumnSpec.java @@ -18,20 +18,18 @@ package harry.ddl; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.UUID; -import com.google.common.collect.ImmutableList; - import harry.generators.Bijections; import harry.generators.StringBijection; -import static harry.generators.StringBijection.getByte; - public class ColumnSpec<T> { public final String name; @@ -318,7 +316,7 @@ public class ColumnSpec<T> } }; - public static final Collection<DataType<?>> DATA_TYPES = ImmutableList.of( + public static final Collection<DataType<?>> DATA_TYPES = Collections.unmodifiableList(Arrays.asList( ColumnSpec.int8Type, ColumnSpec.int16Type, ColumnSpec.int32Type, @@ -328,7 +326,7 @@ public class ColumnSpec<T> ColumnSpec.doubleType, ColumnSpec.asciiType, ColumnSpec.uuidType, - ColumnSpec.timestampType); + ColumnSpec.timestampType)); public static class ReversedType<T> extends DataType<T> { diff --git a/harry-core/src/harry/ddl/SchemaGenerators.java b/harry-core/src/harry/ddl/SchemaGenerators.java index 47936e1..8508d44 100644 --- a/harry-core/src/harry/ddl/SchemaGenerators.java +++ b/harry-core/src/harry/ddl/SchemaGenerators.java @@ -21,16 +21,14 @@ package harry.ddl; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.function.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; - -import com.fasterxml.jackson.annotation.JsonProperty; import harry.generators.Generator; import harry.generators.Surjections; @@ -43,32 +41,30 @@ public class SchemaGenerators return new Builder(ks); } - public static final Collection<ColumnSpec.DataType<?>> clusteringKeyTypes; public static final Map<String, ColumnSpec.DataType<?>> nameToTypeMap; public static final Collection<ColumnSpec.DataType<?>> columnTypes; + public static final Collection<ColumnSpec.DataType<?>> partitionKeyTypes; + public static final Collection<ColumnSpec.DataType<?>> clusteringKeyTypes; static { + partitionKeyTypes = Collections.unmodifiableList(Arrays.asList(ColumnSpec.int64Type, + ColumnSpec.asciiType, + ColumnSpec.asciiType(4, 5), + ColumnSpec.asciiType(4, 10))); + + columnTypes = Collections.unmodifiableList(Arrays.asList( +// ColumnSpec.int8Type, +// ColumnSpec.int16Type, +// ColumnSpec.int32Type, + ColumnSpec.int64Type, + ColumnSpec.asciiType, + ColumnSpec.asciiType(4, 256), + ColumnSpec.asciiType(4, 512))); + - ImmutableList.Builder<ColumnSpec.DataType<?>> builder = ImmutableList.builder(); - builder.add(ColumnSpec.int8Type, - ColumnSpec.int16Type, - ColumnSpec.int32Type, - ColumnSpec.int64Type, - ColumnSpec.asciiType, - ColumnSpec.asciiType(4, 256), - ColumnSpec.asciiType(4, 512)); - - columnTypes = builder.build(); - builder.add(ColumnSpec.int8Type, - ColumnSpec.int16Type, - ColumnSpec.int32Type, - ColumnSpec.int64Type, - ColumnSpec.asciiType); - builder = ImmutableList.builder(); - builder.addAll(columnTypes); - - ImmutableMap.Builder<String, ColumnSpec.DataType<?>> mapBuilder = ImmutableMap.builder(); + List<ColumnSpec.DataType<?>> builder = new ArrayList<>(columnTypes); + Map<String, ColumnSpec.DataType<?>> mapBuilder = new HashMap<>(); for (ColumnSpec.DataType<?> columnType : columnTypes) { @@ -82,8 +78,8 @@ public class SchemaGenerators builder.add(ColumnSpec.floatType); builder.add(ColumnSpec.doubleType); - clusteringKeyTypes = builder.build(); - nameToTypeMap = mapBuilder.build(); + clusteringKeyTypes = Collections.unmodifiableList(builder); + nameToTypeMap = Collections.unmodifiableMap(mapBuilder); } @SuppressWarnings("unchecked") @@ -126,7 +122,7 @@ public class SchemaGenerators public ColumnSpec<?> apply(ColumnSpec.DataType<?> type) { - return new ColumnSpec<>(prefix + (counter++), + return new ColumnSpec<>(String.format("%s%04d", prefix, counter++), type, kind); } @@ -143,7 +139,24 @@ public class SchemaGenerators public ColumnSpec<?> apply(ColumnSpec.DataType<?> type) { - return ColumnSpec.ck(prefix + (counter++), type); + return ColumnSpec.ck(String.format("%s%04d", prefix, counter++), type); + } + }); + } + + @SuppressWarnings("unchecked") + public static Generator<ColumnSpec<?>> partitionColumnSpecGenerator(String prefix) + { + return fromValues(partitionKeyTypes) + .map(new Function<ColumnSpec.DataType<?>, ColumnSpec<?>>() + { + private int counter = 0; + + public ColumnSpec<?> apply(ColumnSpec.DataType<?> type) + { + + return ColumnSpec.pk(String.format("%s%04d", prefix, counter++), + type); } }); } @@ -155,10 +168,10 @@ public class SchemaGenerators private final String keyspace; private final Supplier<String> tableNameSupplier; - private Generator<ColumnSpec<?>> pkGenerator = columnSpecGenerator("pk", ColumnSpec.Kind.PARTITION_KEY); + private Generator<ColumnSpec<?>> pkGenerator = partitionColumnSpecGenerator("pk"); private Generator<ColumnSpec<?>> ckGenerator = clusteringColumnSpecGenerator("ck"); private Generator<ColumnSpec<?>> regularGenerator = columnSpecGenerator("regular", ColumnSpec.Kind.REGULAR); - private Generator<ColumnSpec<?>> staticGenerator = columnSpecGenerator("regular", ColumnSpec.Kind.STATIC); + private Generator<ColumnSpec<?>> staticGenerator = columnSpecGenerator("static", ColumnSpec.Kind.STATIC); private int minPks = 1; private int maxPks = 1; @@ -457,6 +470,9 @@ public class SchemaGenerators public static List<ColumnSpec<?>> toColumns(Map<String, String> config, ColumnSpec.Kind kind, boolean allowReverse) { + if (config == null) + return Collections.EMPTY_LIST; + List<ColumnSpec<?>> columns = new ArrayList<>(config.size()); for (Map.Entry<String, String> e : config.entrySet()) diff --git a/harry-core/src/harry/ddl/SchemaSpec.java b/harry-core/src/harry/ddl/SchemaSpec.java index f07c106..eba87c6 100644 --- a/harry-core/src/harry/ddl/SchemaSpec.java +++ b/harry-core/src/harry/ddl/SchemaSpec.java @@ -18,14 +18,12 @@ package harry.ddl; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.function.Consumer; -import java.util.stream.Stream; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterables; -import com.google.common.collect.Streams; import harry.generators.DataGenerators; import harry.operations.CompiledStatement; @@ -87,22 +85,30 @@ public class SchemaSpec this.keyspace = keyspace; this.table = table; this.isCompactStorage = isCompactStorage; - this.partitionKeys = ImmutableList.copyOf(partitionKeys); + + this.partitionKeys = Collections.unmodifiableList(new ArrayList<>(partitionKeys)); for (int i = 0; i < partitionKeys.size(); i++) partitionKeys.get(i).setColumnIndex(i); - this.clusteringKeys = ImmutableList.copyOf(clusteringKeys); + this.clusteringKeys = Collections.unmodifiableList(new ArrayList<>(clusteringKeys)); for (int i = 0; i < clusteringKeys.size(); i++) clusteringKeys.get(i).setColumnIndex(i); - this.staticColumns = ImmutableList.copyOf(staticColumns); + this.staticColumns = Collections.unmodifiableList(new ArrayList<>(staticColumns)); for (int i = 0; i < staticColumns.size(); i++) staticColumns.get(i).setColumnIndex(i); - this.regularColumns = ImmutableList.copyOf(regularColumns); + this.regularColumns = Collections.unmodifiableList(new ArrayList<>(regularColumns)); for (int i = 0; i < regularColumns.size(); i++) regularColumns.get(i).setColumnIndex(i); - this.allColumns = ImmutableList.copyOf(Iterables.concat(partitionKeys, - clusteringKeys, - staticColumns, - regularColumns)); + + List<ColumnSpec<?>> all = new ArrayList<>(); + for (ColumnSpec<?> columnSpec : concat(partitionKeys, + clusteringKeys, + staticColumns, + regularColumns)) + { + all.add(columnSpec); + } + this.allColumns = Collections.unmodifiableList(all); + this.pkGenerator = DataGenerators.createKeyGenerator(partitionKeys); this.ckGenerator = DataGenerators.createKeyGenerator(clusteringKeys); @@ -122,7 +128,6 @@ public class SchemaSpec } // todo: bitset views? - public BitSet regularColumnsMask() { return this.regularColumnsMask; @@ -254,13 +259,13 @@ public class SchemaSpec sb.append(" PRIMARY KEY"); } - Streams.concat(clusteringKeys.stream(), - staticColumns.stream(), - regularColumns.stream()) - .forEach((cd) -> { - commaAppender.accept(sb); - sb.append(cd.toCQL()); - }); + for (ColumnSpec<?> cd : concat(clusteringKeys, + staticColumns, + regularColumns)) + { + commaAppender.accept(sb); + sb.append(cd.toCQL()); + } if (clusteringKeys.size() > 0 || partitionKeys.size() > 1) { @@ -409,4 +414,59 @@ public class SchemaSpec { return Objects.hash(keyspace, table, partitionKeys, clusteringKeys, regularColumns); } + + public static <T> Iterable<T> concat(Iterable<T>... iterables) + { + assert iterables != null && iterables.length > 0; + if (iterables.length == 1) + return iterables[0]; + + return () -> { + return new Iterator<T>() + { + int idx; + Iterator<T> current; + boolean hasNext; + + { + idx = 0; + prepareNext(); + } + + private void prepareNext() + { + if (current != null && current.hasNext()) + { + hasNext = true; + return; + } + + while (idx < iterables.length) + { + current = iterables[idx].iterator(); + idx++; + if (current.hasNext()) + { + hasNext = true; + return; + } + } + + hasNext = false; + } + + public boolean hasNext() + { + return hasNext; + } + + public T next() + { + T next = current.next(); + prepareNext(); + return next; + } + }; + }; + } } diff --git a/harry-core/src/harry/generators/DataGenerators.java b/harry-core/src/harry/generators/DataGenerators.java index 6878ae4..e87e7dc 100644 --- a/harry-core/src/harry/generators/DataGenerators.java +++ b/harry-core/src/harry/generators/DataGenerators.java @@ -22,15 +22,22 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import com.google.common.annotations.VisibleForTesting; - +import harry.core.VisibleForTesting; import harry.ddl.ColumnSpec; public class DataGenerators { - public static final Object UNSET_VALUE = new Object(); - public static long UNSET_DESCR = 0; - public static long NIL_DESCR = -1; + public static final Object UNSET_VALUE = new Object() { + public String toString() + { + return "UNSET"; + } + }; + + // There is still a slim chance that we're going to produce either of these values by chance, but we'll catch this + // during value generation + public static long UNSET_DESCR = Long.MAX_VALUE; + public static long NIL_DESCR = Long.MIN_VALUE; public static Object[] inflateData(List<ColumnSpec<?>> columns, long[] descriptors) { diff --git a/harry-core/src/harry/generators/RandomGenerator.java b/harry-core/src/harry/generators/RandomGenerator.java index 869f60e..a1ca125 100644 --- a/harry-core/src/harry/generators/RandomGenerator.java +++ b/harry-core/src/harry/generators/RandomGenerator.java @@ -18,8 +18,7 @@ package harry.generators; -import com.google.common.annotations.VisibleForTesting; - +import harry.core.VisibleForTesting; /** * Random generator interface that offers: diff --git a/harry-core/src/harry/generators/RngUtils.java b/harry-core/src/harry/generators/RngUtils.java index 749cf7f..894204f 100644 --- a/harry-core/src/harry/generators/RngUtils.java +++ b/harry-core/src/harry/generators/RngUtils.java @@ -22,8 +22,12 @@ import java.util.function.LongSupplier; public class RngUtils { + private static final long CONSTANT = 0x2545F4914F6CDD1DL; public static long next(long input) { + if (input == 0) + return next(CONSTANT); + return xorshift64star(input); } @@ -32,7 +36,7 @@ public class RngUtils input ^= input >> 12; input ^= input << 25; // b input ^= input >> 27; // c - return input * 0x2545F4914F6CDD1DL; + return input * CONSTANT; } public static long[] next(long current, int n) diff --git a/harry-core/src/harry/generators/Surjections.java b/harry-core/src/harry/generators/Surjections.java index e5a937c..13f66ff 100644 --- a/harry-core/src/harry/generators/Surjections.java +++ b/harry-core/src/harry/generators/Surjections.java @@ -26,7 +26,7 @@ import java.util.function.Function; import java.util.function.LongFunction; import java.util.function.Supplier; -import com.google.common.annotations.VisibleForTesting; +import harry.core.VisibleForTesting; public class Surjections { diff --git a/harry-core/src/harry/model/AlwaysSamePartitionSelector.java b/harry-core/src/harry/model/AlwaysSamePartitionSelector.java new file mode 100644 index 0000000..43160fb --- /dev/null +++ b/harry-core/src/harry/model/AlwaysSamePartitionSelector.java @@ -0,0 +1,69 @@ +package harry.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import harry.core.Configuration; + +/** + * A simple test-only descriptor selector that can used for testing things where you only need one partition + */ +public class AlwaysSamePartitionSelector extends OpSelectors.PdSelector +{ + private final long pd; + + public AlwaysSamePartitionSelector(long pd) + { + this.pd = pd; + } + + protected long pd(long lts) + { + return 0; + } + + public long nextLts(long lts) + { + return lts + 1; + } + + public long prevLts(long lts) + { + return lts - 1; + } + + public long maxLtsFor(long pd) + { + return 1000; + } + + public long minLtsAt(long position) + { + return 0; + } + + public long minLtsFor(long pd) + { + return 0; + } + + public long positionFor(long lts) + { + return 0; + } + + @JsonTypeName("always_same") + public static class AlwaysSamePartitionSelectorConfiguration implements Configuration.PDSelectorConfiguration + { + private final long pd; + + public AlwaysSamePartitionSelectorConfiguration(@JsonProperty("pd") long pd) + { + this.pd = pd; + } + + public OpSelectors.PdSelector make(OpSelectors.Rng rng) + { + return new AlwaysSamePartitionSelector(pd); + } + } +} diff --git a/harry-core/src/harry/model/NoOpChecker.java b/harry-core/src/harry/model/NoOpChecker.java index 4cf4606..a13b6ec 100644 --- a/harry-core/src/harry/model/NoOpChecker.java +++ b/harry-core/src/harry/model/NoOpChecker.java @@ -34,6 +34,7 @@ public class NoOpChecker implements Model public void validate(Query query) { run.sut.execute(query.toSelectStatement(), - SystemUnderTest.ConsistencyLevel.ALL); + // TODO: make it configurable + SystemUnderTest.ConsistencyLevel.QUORUM); } } diff --git a/harry-core/src/harry/model/OpSelectors.java b/harry-core/src/harry/model/OpSelectors.java index 3adbb95..3dd4a25 100644 --- a/harry-core/src/harry/model/OpSelectors.java +++ b/harry-core/src/harry/model/OpSelectors.java @@ -22,9 +22,8 @@ import java.util.EnumMap; import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; - import harry.core.Configuration; +import harry.core.VisibleForTesting; import harry.ddl.ColumnSpec; import harry.ddl.SchemaSpec; import harry.generators.Bytes; @@ -34,6 +33,7 @@ import harry.generators.Surjections; import harry.generators.distribution.Distribution; import harry.util.BitSet; +import static harry.generators.DataGenerators.NIL_DESCR; import static harry.generators.DataGenerators.UNSET_DESCR; /** @@ -179,18 +179,20 @@ public interface OpSelectors public long[] vds(long pd, long cd, long lts, long opId, SchemaSpec schema) { - return descriptors(pd, cd, lts, opId, schema.regularColumns, schema.regularColumnsMask(), schema.regularColumnsOffset); + BitSet setColumns = columnMask(pd, lts, opId); + return descriptors(pd, cd, lts, opId, schema.regularColumns, schema.regularColumnsMask(), setColumns, schema.regularColumnsOffset); } public long[] sds(long pd, long cd, long lts, long opId, SchemaSpec schema) { - return descriptors(pd, cd, lts, opId, schema.staticColumns, schema.staticColumnsMask(), schema.staticColumnsOffset); + BitSet setColumns = columnMask(pd, lts, opId); + return descriptors(pd, cd, lts, opId, schema.staticColumns, schema.staticColumnsMask(), setColumns, schema.staticColumnsOffset); } - public long[] descriptors(long pd, long cd, long lts, long opId, List<ColumnSpec<?>> columns, BitSet mask, int offset) + private long[] descriptors(long pd, long cd, long lts, long opId, List<ColumnSpec<?>> columns, BitSet mask, BitSet setColumns, int offset) { + assert opId < opsPerModification(lts) * numberOfModifications(lts) : String.format("Operation id %d exceeds the maximum expected number of operations %d", opId, opsPerModification(lts) * numberOfModifications(lts)); long[] descriptors = new long[columns.size()]; - BitSet setColumns = columnMask(pd, cd, opId); for (int i = 0; i < descriptors.length; i++) { @@ -199,6 +201,9 @@ public interface OpSelectors { ColumnSpec<?> spec = columns.get(i); long vd = vd(pd, cd, lts, opId, col) & Bytes.bytePatternFor(spec.type.maxSize()); + assert vd != UNSET_DESCR : "Ambiguous unset descriptor generated for the value"; + assert vd != NIL_DESCR : "Ambiguous nil descriptor generated for the value"; + descriptors[i] = vd; } else @@ -387,13 +392,13 @@ public interface OpSelectors switch (type) { + case UPDATE_WITH_STATICS: case DELETE_COLUMN_WITH_STATICS: gen = (descriptor) -> { long counter = 0; while (counter <= 100) { BitSet bitSet = orig.inflate(descriptor); - if ((schema.regularColumns.isEmpty() || !bitSet.allUnset(schema.regularColumnsMask)) && (schema.staticColumns.isEmpty() || !bitSet.allUnset(schema.staticColumnsMask))) return bitSet; @@ -404,6 +409,23 @@ public interface OpSelectors throw new RuntimeException(String.format("Could not generate a value after %d attempts.", counter)); }; break; + // Can not have an UPDATE statement without anything to update + case UPDATE: + gen = descriptor -> { + long counter = 0; + while (counter <= 100) + { + BitSet bitSet = orig.inflate(descriptor); + + if (!bitSet.allUnset(schema.regularColumnsMask)) + return bitSet; + + descriptor = RngUtils.next(descriptor); + counter++; + } + throw new RuntimeException(String.format("Could not generate a value after %d attempts.", counter)); + }; + break; case DELETE_COLUMN: gen = (descriptor) -> { long counter = 0; @@ -429,7 +451,7 @@ public interface OpSelectors public ColumnSelectorBuilder forWrite(Surjections.Surjection<BitSet> gen) { - m.put(OperationKind.WRITE, gen); + m.put(OperationKind.INSERT, gen); return this; } @@ -547,8 +569,8 @@ public interface OpSelectors protected final static long BITSET_IDX_STREAM = 0x92eb607bef1L; public static OperationSelector DEFAULT_OP_SELECTOR = OperationSelector.weighted(Surjections.weights(45, 45, 3, 2, 2, 1, 1, 1), - OperationKind.WRITE, - OperationKind.WRITE_WITH_STATICS, + OperationKind.INSERT, + OperationKind.INSERT_WITH_STATICS, OperationKind.DELETE_ROW, OperationKind.DELETE_COLUMN, OperationKind.DELETE_COLUMN_WITH_STATICS, @@ -649,7 +671,8 @@ public interface OpSelectors public OperationKind operationType(long pd, long lts, long opId) { - return operationType(pd, lts, opId, partitionLevelOperationsMask(pd, lts)); + OperationKind kind = operationType(pd, lts, opId, partitionLevelOperationsMask(pd, lts)); + return kind; } // TODO: create this bitset once per lts @@ -666,7 +689,8 @@ public interface OpSelectors public OperationKind operationType(long pd, long lts, long opId, BitSet partitionLevelOperationsMask) { - return operationSelector.inflate(pd ^ lts ^ opId, partitionLevelOperationsMask.isSet((int) opId)); + long descriptor = rng.randomNumber(pd ^ lts ^ opId, BITSET_IDX_STREAM); + return operationSelector.inflate(descriptor, partitionLevelOperationsMask.isSet((int) opId)); } public BitSet columnMask(long pd, long lts, long opId) @@ -688,8 +712,10 @@ public interface OpSelectors public enum OperationKind { - WRITE(false), - WRITE_WITH_STATICS(true), + UPDATE(false), + INSERT(false), + UPDATE_WITH_STATICS(true), + INSERT_WITH_STATICS(true), DELETE_PARTITION(true), DELETE_ROW(false), DELETE_COLUMN(false), diff --git a/harry-core/src/harry/model/SelectHelper.java b/harry-core/src/harry/model/SelectHelper.java index 70d1eb2..fc8f6f7 100644 --- a/harry-core/src/harry/model/SelectHelper.java +++ b/harry-core/src/harry/model/SelectHelper.java @@ -19,13 +19,9 @@ package harry.model; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import com.datastax.driver.core.querybuilder.Ordering; -import com.datastax.driver.core.querybuilder.QueryBuilder; -import com.datastax.driver.core.querybuilder.Select; import harry.data.ResultSetRow; import harry.ddl.ColumnSpec; import harry.ddl.SchemaSpec; @@ -48,67 +44,119 @@ public class SelectHelper */ public static CompiledStatement select(SchemaSpec schema, long pd, List<Relation> relations, boolean reverse, boolean includeWriteTime) { - Select.Selection select = QueryBuilder.select(); - for (ColumnSpec<?> column : schema.allColumns) - select.column(column.name); + StringBuilder b = new StringBuilder(); + b.append("SELECT "); + + for (int i = 0; i < schema.allColumns.size(); i++) + { + ColumnSpec<?> spec = schema.allColumns.get(i); + if (i > 0) + b.append(", "); + b.append(spec.name); + } if (includeWriteTime) { for (ColumnSpec<?> column : schema.staticColumns) - select.writeTime(column.name); + b.append(", ") + .append("writetime(") + .append(column.name) + .append(")"); for (ColumnSpec<?> column : schema.regularColumns) - select.writeTime(column.name); + b.append(", ") + .append("writetime(") + .append(column.name) + .append(")"); } - Select.Where where = select.from(schema.keyspace, schema.table).where(); - List<Object> bindings = new ArrayList<>(); + b.append(" FROM ") + .append(schema.keyspace) + .append(".") + .append(schema.table) + .append(" WHERE "); - addRelations(schema, where, bindings, pd, relations); - addOrderBy(schema, where, reverse); + List<Object> bindings = new ArrayList<>(); + schema.inflateRelations(pd, + relations, + new SchemaSpec.AddRelationCallback() + { + boolean isFirst = true; + public void accept(ColumnSpec<?> spec, Relation.RelationKind kind, Object value) + { + if (isFirst) + isFirst = false; + else + b.append(" AND "); + b.append(kind.getClause(spec)); + bindings.add(value); + } + }); + addOrderBy(schema, b, reverse); + b.append(";"); Object[] bindingsArr = bindings.toArray(new Object[bindings.size()]); - return new CompiledStatement(where.toString(), bindingsArr); + return new CompiledStatement(b.toString(), bindingsArr); } public static CompiledStatement count(SchemaSpec schema, long pd) { - Select.Selection select = QueryBuilder.select(); - select.countAll(); - - Select.Where where = select.from(schema.keyspace, schema.table).where(); - List<Object> bindings = new ArrayList<>(schema.partitionKeys.size()); + StringBuilder b = new StringBuilder(); + b.append("SELECT count(*) "); - addRelations(schema, where, bindings, pd, Collections.emptyList()); + b.append(" FROM ") + .append(schema.keyspace) + .append(".") + .append(schema.table) + .append(" WHERE "); - Object[] bindingsArr = bindings.toArray(new Object[bindings.size()]); - return new CompiledStatement(where.toString(), bindingsArr); - } + List<Object> bindings = new ArrayList<>(schema.partitionKeys.size()); - private static void addRelations(SchemaSpec schema, Select.Where where, List<Object> bindings, long pd, List<Relation> relations) - { schema.inflateRelations(pd, - relations, - (spec, kind, value) -> { - where.and(kind.getClause(spec)); - bindings.add(value); + Collections.emptyList(), + new SchemaSpec.AddRelationCallback() + { + boolean isFirst = true; + public void accept(ColumnSpec<?> spec, Relation.RelationKind kind, Object value) + { + if (isFirst) + isFirst = false; + else + b.append(" AND "); + b.append(kind.getClause(spec)); + bindings.add(value); + } }); + + Object[] bindingsArr = bindings.toArray(new Object[bindings.size()]); + return new CompiledStatement(b.toString(), bindingsArr); } - private static void addOrderBy(SchemaSpec schema, Select.Where whereClause, boolean reverse) + private static void addOrderBy(SchemaSpec schema, StringBuilder b, boolean reverse) { if (reverse && schema.clusteringKeys.size() > 0) { - Ordering[] ordering = new Ordering[schema.clusteringKeys.size()]; + b.append(" ORDER BY "); for (int i = 0; i < schema.clusteringKeys.size(); i++) { ColumnSpec<?> c = schema.clusteringKeys.get(i); - ordering[i] = c.isReversed() ? QueryBuilder.asc(c.name) : QueryBuilder.desc(c.name); + if (i > 0) + b.append(", "); + b.append(c.isReversed() ? asc(c.name) : desc(c.name)); } - whereClause.orderBy(ordering); } } + public static String asc(String name) + { + return name + " ASC"; + } + + public static String desc(String name) + { + return name + " DESC"; + } + public static ResultSetRow resultSetToRow(SchemaSpec schema, OpSelectors.MonotonicClock clock, Object[] result) { Object[] partitionKey = new Object[schema.partitionKeys.size()]; diff --git a/harry-core/src/harry/model/clock/ApproximateMonotonicClock.java b/harry-core/src/harry/model/clock/ApproximateMonotonicClock.java index 59dd47e..1a64500 100644 --- a/harry-core/src/harry/model/clock/ApproximateMonotonicClock.java +++ b/harry-core/src/harry/model/clock/ApproximateMonotonicClock.java @@ -25,9 +25,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLongArray; import java.util.concurrent.locks.LockSupport; -import com.google.common.annotations.VisibleForTesting; - import harry.core.Configuration; +import harry.core.VisibleForTesting; import harry.model.OpSelectors; /** diff --git a/harry-core/src/harry/model/clock/OffsetClock.java b/harry-core/src/harry/model/clock/OffsetClock.java index 9f40a64..8c25394 100644 --- a/harry-core/src/harry/model/clock/OffsetClock.java +++ b/harry-core/src/harry/model/clock/OffsetClock.java @@ -20,6 +20,9 @@ package harry.model.clock; import java.util.concurrent.atomic.AtomicLong; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; import harry.core.Configuration; import harry.model.OpSelectors; @@ -58,4 +61,21 @@ public class OffsetClock implements OpSelectors.MonotonicClock { throw new RuntimeException("not implemented"); } + + @JsonTypeName("offset") + public static class OffsetClockConfiguration implements Configuration.ClockConfiguration + { + public final long offset; + + @JsonCreator + public OffsetClockConfiguration(@JsonProperty("offset") int offset) + { + this.offset = offset; + } + + public OpSelectors.MonotonicClock make() + { + return new OffsetClock(offset); + } + } } diff --git a/harry-core/src/harry/model/sut/PrintlnSut.java b/harry-core/src/harry/model/sut/PrintlnSut.java index ad14b0b..f1b310e 100644 --- a/harry-core/src/harry/model/sut/PrintlnSut.java +++ b/harry-core/src/harry/model/sut/PrintlnSut.java @@ -21,6 +21,10 @@ package harry.model.sut; import java.util.Arrays; import java.util.concurrent.CompletableFuture; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonTypeName; +import harry.core.Configuration; + public class PrintlnSut implements SystemUnderTest { public boolean isShutdown() @@ -46,4 +50,18 @@ public class PrintlnSut implements SystemUnderTest return CompletableFuture.supplyAsync(() -> execute(statement, cl, bindings), Runnable::run); } + + @JsonTypeName("println") + public static class PrintlnSutConfiguration implements Configuration.SutConfiguration + { + @JsonCreator + public PrintlnSutConfiguration() + { + + } + public SystemUnderTest make() + { + return new PrintlnSut(); + } + } } diff --git a/harry-core/src/harry/operations/DeleteHelper.java b/harry-core/src/harry/operations/DeleteHelper.java index 6f04bad..f1b6983 100644 --- a/harry-core/src/harry/operations/DeleteHelper.java +++ b/harry-core/src/harry/operations/DeleteHelper.java @@ -23,11 +23,8 @@ import java.util.Collections; import java.util.List; import java.util.function.IntConsumer; -import com.datastax.driver.core.querybuilder.Delete; -import com.datastax.driver.core.querybuilder.QueryBuilder; import harry.ddl.ColumnSpec; import harry.ddl.SchemaSpec; -import harry.runner.LoggingPartitionVisitor; import harry.util.BitSet; public class DeleteHelper @@ -130,40 +127,50 @@ public class DeleteHelper BitSet mask, long ts) { - Delete delete; - if (columnsToDelete == null) - delete = QueryBuilder.delete().from(schema.keyspace, schema.table); - else + StringBuilder b = new StringBuilder(); + b.append("DELETE "); + if (columnsToDelete != null) { assert mask != null; assert relations == null || relations.stream().allMatch((r) -> r.kind == Relation.RelationKind.EQ); - delete = QueryBuilder.delete(columnNames(schema.allColumns, columnsToDelete, mask)) - .from(schema.keyspace, schema.table); + String[] names = columnNames(schema.allColumns, columnsToDelete, mask); + for (int i = 0; i < names.length; i++) + { + if (i > 0) + b.append(", "); + b.append(names[i]); + } + b.append(" "); } + b.append("FROM ") + .append(schema.keyspace).append(".").append(schema.table) + .append(" USING TIMESTAMP ") + .append(ts) + .append(" WHERE "); - Delete.Where where = delete.where(); List<Object> bindings = new ArrayList<>(); - addRelations(schema, where, bindings, pd, relations); - delete.using(QueryBuilder.timestamp(ts)); - delete.setForceNoValues(true); - Object[] bindingsArr = bindings.toArray(new Object[bindings.size()]); - String compiled = delete.getQueryString(); - if (compiled.contains("built query (could not generate with default codec registry:")) - throw new IllegalArgumentException(String.format("Could not generate the query: %s. Bindings: (%s)", - delete, - CompiledStatement.bindingsToString(bindingsArr))); - return new CompiledStatement(compiled, bindingsArr); - } - - private static void addRelations(SchemaSpec schema, Delete.Where where, List<Object> bindings, long pd, List<Relation> relations) - { schema.inflateRelations(pd, relations, - (spec, kind, value) -> { - where.and(kind.getClause(spec)); - bindings.add(value); + new SchemaSpec.AddRelationCallback() + { + boolean isFirst = true; + public void accept(ColumnSpec<?> spec, Relation.RelationKind kind, Object value) + { + if (isFirst) + isFirst = false; + else + b.append(" AND "); + b.append(kind.getClause(spec)); + bindings.add(value); + } }); + + b.append(";"); + + Object[] bindingsArr = bindings.toArray(new Object[bindings.size()]); + + return new CompiledStatement(b.toString(), bindingsArr); } private static String[] columnNames(List<ColumnSpec<?>> columns, BitSet selectedColumns, BitSet mask) diff --git a/harry-core/src/harry/operations/Relation.java b/harry-core/src/harry/operations/Relation.java index 19db0b4..87487d9 100644 --- a/harry-core/src/harry/operations/Relation.java +++ b/harry-core/src/harry/operations/Relation.java @@ -21,16 +21,8 @@ package harry.operations; import java.util.ArrayList; import java.util.List; -import com.datastax.driver.core.querybuilder.Clause; import harry.ddl.ColumnSpec; -import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; -import static com.datastax.driver.core.querybuilder.QueryBuilder.gt; -import static com.datastax.driver.core.querybuilder.QueryBuilder.gte; -import static com.datastax.driver.core.querybuilder.QueryBuilder.lt; -import static com.datastax.driver.core.querybuilder.QueryBuilder.lte; - public class Relation { public final RelationKind kind; @@ -62,9 +54,9 @@ public class Relation return columnSpec.name; } - public Clause toClause() + public String toClause() { - return kind.getClause(column(), bindMarker()); + return kind.getClause(column()); } public String toString() @@ -101,7 +93,7 @@ public class Relation public static void addRelation(long[] key, List<ColumnSpec<?>> columnSpecs, List<Relation> relations, RelationKind kind) { assert key.length == columnSpecs.size() : - String.format("Key size (%d) should equal to column spec size (%d)", key.length, columnSpecs.size()); + String.format("Key size (%d) should equal to column spec size (%d). Specs: %s", key.length, columnSpecs.size(), columnSpecs); for (int i = 0; i < key.length; i++) { ColumnSpec<?> spec = columnSpecs.get(i); @@ -113,17 +105,6 @@ public class Relation { LT { - @Override - public Clause getClause(String name, Object obj) - { - return lt(name, obj); - } - - public Clause getClause(List<String> name, List<Object> obj) - { - return lt(name, obj); - } - public boolean isNegatable() { return true; @@ -156,17 +137,6 @@ public class Relation }, GT { - @Override - public Clause getClause(String name, Object obj) - { - return gt(name, obj); - } - - public Clause getClause(List<String> name, List<Object> obj) - { - return gt(name, obj); - } - public boolean isNegatable() { return true; @@ -199,17 +169,6 @@ public class Relation }, LTE { - @Override - public Clause getClause(String name, Object obj) - { - return lte(name, obj); - } - - public Clause getClause(List<String> name, List<Object> obj) - { - return lt(name, obj); - } - public boolean isNegatable() { return true; @@ -242,17 +201,6 @@ public class Relation }, GTE { - @Override - public Clause getClause(String name, Object obj) - { - return gte(name, obj); - } - - public Clause getClause(List<String> name, List<Object> obj) - { - return gte(name, obj); - } - public boolean isNegatable() { return true; @@ -285,17 +233,6 @@ public class Relation }, EQ { - @Override - public Clause getClause(String name, Object obj) - { - return eq(name, obj); - } - - public Clause getClause(List<String> name, List<Object> obj) - { - return eq(name, obj); - } - public boolean isNegatable() { return false; @@ -329,14 +266,15 @@ public class Relation public abstract boolean match(LongComparator comparator, long l, long r); - public abstract Clause getClause(String name, Object obj); - - public Clause getClause(ColumnSpec<?> spec) + public String getClause(String name) { - return getClause(spec.name, bindMarker()); + return String.format("%s %s ?", name, toString()); } - public abstract Clause getClause(List<String> name, List<Object> obj); + public String getClause(ColumnSpec<?> spec) + { + return getClause(spec.name); + } public abstract boolean isNegatable(); diff --git a/harry-core/src/harry/operations/WriteHelper.java b/harry-core/src/harry/operations/WriteHelper.java index a0b7565..084f931 100644 --- a/harry-core/src/harry/operations/WriteHelper.java +++ b/harry-core/src/harry/operations/WriteHelper.java @@ -18,21 +18,13 @@ package harry.operations; +import java.util.Arrays; import java.util.List; import harry.ddl.ColumnSpec; import harry.ddl.SchemaSpec; import harry.generators.DataGenerators; -import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.driver.core.querybuilder.QueryBuilder.eq; -import static com.datastax.driver.core.querybuilder.QueryBuilder.in; -import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto; -import static com.datastax.driver.core.querybuilder.QueryBuilder.set; -import static com.datastax.driver.core.querybuilder.QueryBuilder.timestamp; -import static com.datastax.driver.core.querybuilder.QueryBuilder.truncate; -import static com.datastax.driver.core.querybuilder.QueryBuilder.update; - public class WriteHelper { public static CompiledStatement inflateInsert(SchemaSpec schema, @@ -48,117 +40,127 @@ public class WriteHelper Object[] regularColumns = schema.inflateRegularColumns(vds); Object[] bindings = new Object[schema.allColumns.size()]; - int bindingsCount = 0; - com.datastax.driver.core.querybuilder.Insert insert = insertInto(schema.keyspace, - schema.table); - bindingsCount += addValue(insert, bindings, schema.partitionKeys, partitionKey, bindingsCount); - bindingsCount += addValue(insert, bindings, schema.clusteringKeys, clusteringKey, bindingsCount); + StringBuilder b = new StringBuilder(); + b.append("INSERT INTO ") + .append(schema.keyspace) + .append('.') + .append(schema.table) + .append(" ("); + + int bindingsCount = 0; + bindingsCount += appendStatements(b, bindings, schema.partitionKeys, partitionKey, bindingsCount, true, ",", "%s"); + bindingsCount += appendStatements(b, bindings, schema.clusteringKeys, clusteringKey, bindingsCount, false, ",", "%s"); + bindingsCount += appendStatements(b, bindings, schema.regularColumns, regularColumns, bindingsCount, false, ",", "%s"); if (staticColumns != null) - bindingsCount += addValue(insert, bindings, schema.staticColumns, staticColumns, bindingsCount); - bindingsCount += addValue(insert, bindings, schema.regularColumns, regularColumns, bindingsCount); + bindingsCount += appendStatements(b, bindings, schema.staticColumns, staticColumns, bindingsCount, false, ",", "%s"); - insert.using(timestamp(timestamp)); + b.append(") VALUES ("); - // Some of the values were unset - if (bindingsCount != bindings.length) + for (int i = 0; i < bindingsCount; i++) { - Object[] tmp = new Object[bindingsCount]; - System.arraycopy(bindings, 0, tmp, 0, bindingsCount); - bindings = tmp; + if (i > 0) + b.append(", "); + b.append("?"); } - return CompiledStatement.create(insert.toString(), bindings); + b.append(") USING TIMESTAMP ") + .append(timestamp); + + return new CompiledStatement(b.toString(), adjustArraySize(bindings, bindingsCount)); } - public static boolean allUnset(long[] descriptors) + public static Object[] adjustArraySize(Object[] bindings, int bindingsCount) { - for (long descriptor : descriptors) + if (bindingsCount != bindings.length) { - if (descriptor != DataGenerators.UNSET_DESCR) - return false; + Object[] tmp = new Object[bindingsCount]; + System.arraycopy(bindings, 0, tmp, 0, bindingsCount); + bindings = tmp; } - return true; - } - private static int addValue(com.datastax.driver.core.querybuilder.Insert insert, - Object[] bindings, - List<ColumnSpec<?>> columns, - Object[] data, - int bound) - { - assert data.length == columns.size(); - - int bindingsCount = 0; - for (int i = 0; i < data.length; i++) - { - if (data[i] == DataGenerators.UNSET_VALUE) - continue; - - insert.value(columns.get(i).name, bindMarker()); - bindings[bound + bindingsCount] = data[i]; - bindingsCount++; - } - - return bindingsCount; + return bindings; } public static CompiledStatement inflateUpdate(SchemaSpec schema, long pd, long cd, long[] vds, + long[] sds, long timestamp) { Object[] partitionKey = schema.inflatePartitionKey(pd); Object[] clusteringKey = schema.inflateClusteringKey(cd); + Object[] staticColumns = sds == null ? null : schema.inflateStaticColumns(sds); Object[] regularColumns = schema.inflateRegularColumns(vds); Object[] bindings = new Object[schema.allColumns.size()]; - int bindingsCount = 0; - com.datastax.driver.core.querybuilder.Update update = update(schema.keyspace, - schema.table); - bindingsCount += addWith(update, bindings, schema.regularColumns, regularColumns, bindingsCount); - bindingsCount += addWhere(update, bindings, schema.partitionKeys, partitionKey, bindingsCount); - bindingsCount += addWhere(update, bindings, schema.clusteringKeys, clusteringKey, bindingsCount); + StringBuilder b = new StringBuilder(); + b.append("UPDATE ") + .append(schema.keyspace) + .append('.') + .append(schema.table) + .append(" USING TIMESTAMP ") + .append(timestamp) + .append(" SET "); - update.using(timestamp(timestamp)); - // TODO: TTL - // ttl.ifPresent(ts -> update.using(ttl(ts))); + int bindingsCount = 0; + bindingsCount += addSetStatements(b, bindings, schema.regularColumns, regularColumns, bindingsCount); + if (staticColumns != null) + bindingsCount += addSetStatements(b, bindings, schema.staticColumns, staticColumns, bindingsCount); + + assert bindingsCount > 0 : "Can not have an UPDATE statement without any updates"; + b.append(" WHERE "); - return CompiledStatement.create(update.toString(), bindings); + bindingsCount += addWhereStatements(b, bindings, schema.partitionKeys, partitionKey, bindingsCount, true); + bindingsCount += addWhereStatements(b, bindings, schema.clusteringKeys, clusteringKey, bindingsCount, false); + b.append(";"); + return new CompiledStatement(b.toString(), adjustArraySize(bindings, bindingsCount)); } - private static int addWith(com.datastax.driver.core.querybuilder.Update update, - Object[] bindings, - List<ColumnSpec<?>> columns, - Object[] data, - int bound) + private static int addSetStatements(StringBuilder b, + Object[] bindings, + List<ColumnSpec<?>> columns, + Object[] values, + int bound) { - assert data.length == columns.size(); - - for (int i = 0; i < data.length; i++) - { - update.with(set(columns.get(i).name, bindMarker())); - bindings[bound + i] = data[i]; - } - - return data.length; + return appendStatements(b, bindings, columns, values, bound, bound == 0, ", ", "%s = ?"); } - private static int addWhere(com.datastax.driver.core.querybuilder.Update update, - Object[] bindings, - List<ColumnSpec<?>> columns, - Object[] data, - int bound) + private static int addWhereStatements(StringBuilder b, + Object[] bindings, + List<ColumnSpec<?>> columns, + Object[] values, + int bound, + boolean firstStatement) { - assert data.length == columns.size(); + return appendStatements(b, bindings, columns, values, bound, firstStatement, " AND ", "%s = ?"); + } - for (int i = 0; i < data.length; i++) + private static int appendStatements(StringBuilder b, + Object[] allBindings, + List<ColumnSpec<?>> columns, + Object[] values, + int bound, + boolean firstStatement, + String separator, + String nameFormatter) + { + int bindingsCount = 0; + for (int i = 0; i < values.length; i++) { - update.where().and(eq(columns.get(i).name, bindMarker())); - bindings[bound + i] = data[i]; - } + Object value = values[i]; + if (value == DataGenerators.UNSET_VALUE) + continue; + + ColumnSpec<?> column = columns.get(i); + if (bindingsCount > 0 || !firstStatement) + b.append(separator); - return data.length; + b.append(String.format(nameFormatter, column.name)); + allBindings[bound + bindingsCount] = value; + bindingsCount++; + } + return bindingsCount; } } \ No newline at end of file diff --git a/harry-core/src/harry/reconciler/Reconciler.java b/harry-core/src/harry/reconciler/Reconciler.java index ca4772c..a9709b3 100644 --- a/harry-core/src/harry/reconciler/Reconciler.java +++ b/harry-core/src/harry/reconciler/Reconciler.java @@ -125,8 +125,10 @@ public class Reconciler hadPartitionDeletion = true; break; - case WRITE_WITH_STATICS: - case WRITE: + case INSERT_WITH_STATICS: + case INSERT: + case UPDATE: + case UPDATE_WITH_STATICS: if (debugCd != -1 && cd == debugCd) logger.info("Writing {} ({}) at {}/{}", cd, opType, lts, opId); writes.add(opId); @@ -164,12 +166,14 @@ public class Reconciler switch (opType) { - case WRITE_WITH_STATICS: + case INSERT_WITH_STATICS: + case UPDATE_WITH_STATICS: // We could apply static columns during the first iteration, but it's more convenient // to reconcile static-level deletions. partitionState.writeStaticRow(descriptorSelector.sds(pd, cd, lts, opId, schema), lts); - case WRITE: + case INSERT: + case UPDATE: if (!query.match(cd)) { if (debugCd != -1 && cd == debugCd) @@ -189,7 +193,8 @@ public class Reconciler partitionState.write(cd, descriptorSelector.vds(pd, cd, lts, opId, schema), - lts); + lts, + opType == OpSelectors.OperationKind.INSERT || opType == OpSelectors.OperationKind.INSERT_WITH_STATICS); break; default: throw new IllegalStateException(); @@ -206,7 +211,8 @@ public class Reconciler switch (opType) { case DELETE_COLUMN_WITH_STATICS: - partitionState.deleteStaticColumns(schema.staticColumnsOffset, + partitionState.deleteStaticColumns(lts, + schema.staticColumnsOffset, descriptorSelector.columnMask(pd, lts, opId), schema.staticColumnsMask()); case DELETE_COLUMN: @@ -227,7 +233,8 @@ public class Reconciler } } - partitionState.deleteRegularColumns(cd, + partitionState.deleteRegularColumns(lts, + cd, schema.regularColumnsOffset, descriptorSelector.columnMask(pd, lts, opId), schema.regularColumnsMask()); @@ -270,14 +277,15 @@ public class Reconciler long lts) { if (staticRow != null) - staticRow = updateRowState(staticRow, schema.staticColumns, STATIC_CLUSTERING, staticVds, lts); + staticRow = updateRowState(staticRow, schema.staticColumns, STATIC_CLUSTERING, staticVds, lts, false); } private void write(long cd, long[] vds, - long lts) + long lts, + boolean writeParimaryKeyLiveness) { - rows.compute(cd, (cd_, current) -> updateRowState(current, schema.regularColumns, cd, vds, lts)); + rows.compute(cd, (cd_, current) -> updateRowState(current, schema.regularColumns, cd, vds, lts, writeParimaryKeyLiveness)); } private void delete(Ranges.Range range, @@ -304,14 +312,19 @@ public class Reconciler private void delete(long cd, long lts) { - rows.remove(cd); + RowState state = rows.remove(cd); + if (state != null) + { + for (long v : state.lts) + assert lts >= v : String.format("Attempted to remove a row with a tombstone that has older timestamp (%d): %s", lts, state); + } } public boolean isEmpty() { return rows.isEmpty(); } - private RowState updateRowState(RowState currentState, List<ColumnSpec<?>> columns, long cd, long[] vds, long lts) + private RowState updateRowState(RowState currentState, List<ColumnSpec<?>> columns, long cd, long[] vds, long lts, boolean writePrimaryKeyLiveness) { if (currentState == null) { @@ -359,24 +372,30 @@ public class Reconciler } } + if (writePrimaryKeyLiveness) + currentState.hasPrimaryKeyLivenessInfo = true; + return currentState; } - private void deleteRegularColumns(long cd, int columnOffset, BitSet columns, BitSet mask) + private void deleteRegularColumns(long lts, long cd, int columnOffset, BitSet columns, BitSet mask) { - deleteColumns(rows.get(cd), columnOffset, columns, mask); + deleteColumns(lts, rows.get(cd), columnOffset, columns, mask); } - private void deleteStaticColumns(int columnOffset, BitSet columns, BitSet mask) + private void deleteStaticColumns(long lts, int columnOffset, BitSet columns, BitSet mask) { - deleteColumns(staticRow, columnOffset, columns, mask); + deleteColumns(lts, staticRow, columnOffset, columns, mask); } - private void deleteColumns(RowState state, int columnOffset, BitSet columns, BitSet mask) + private void deleteColumns(long lts, RowState state, int columnOffset, BitSet columns, BitSet mask) { if (state == null) return; + //TODO: optimise by iterating over the columns that were removed by this deletion + //TODO: optimise final decision to fully remove the column by counting a number of set/unset columns + boolean allNil = true; for (int i = 0; i < state.vds.length; i++) { if (columns.isSet(columnOffset + i, mask)) @@ -384,7 +403,14 @@ public class Reconciler state.vds[i] = NIL_DESCR; state.lts[i] = NO_TIMESTAMP; } + else if (state.vds[i] != NIL_DESCR) + { + allNil = false; + } } + + if (state.cd != STATIC_CLUSTERING && allNil & !state.hasPrimaryKeyLivenessInfo) + delete(state.cd, lts); } private void deletePartition(long lts) @@ -449,6 +475,7 @@ public class Reconciler public static class RowState { + public boolean hasPrimaryKeyLivenessInfo = false; public final long cd; public final long[] vds; public final long[] lts; diff --git a/harry-core/src/harry/runner/CorruptingPartitionVisitor.java b/harry-core/src/harry/runner/CorruptingPartitionVisitor.java index ca5bb93..d079cf3 100644 --- a/harry-core/src/harry/runner/CorruptingPartitionVisitor.java +++ b/harry-core/src/harry/runner/CorruptingPartitionVisitor.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package harry.runner; import java.util.Random; diff --git a/harry-core/src/harry/runner/DataTracker.java b/harry-core/src/harry/runner/DataTracker.java index d5825fb..dab6fc9 100644 --- a/harry-core/src/harry/runner/DataTracker.java +++ b/harry-core/src/harry/runner/DataTracker.java @@ -18,6 +18,8 @@ package harry.runner; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonTypeName; import harry.core.Configuration; public interface DataTracker @@ -35,6 +37,7 @@ public interface DataTracker } public static DataTracker NO_OP = new NoOpDataTracker(); + class NoOpDataTracker implements DataTracker { private NoOpDataTracker() {} diff --git a/harry-core/src/harry/runner/DefaultDataTracker.java b/harry-core/src/harry/runner/DefaultDataTracker.java index 1b55482..7b1412a 100644 --- a/harry-core/src/harry/runner/DefaultDataTracker.java +++ b/harry-core/src/harry/runner/DefaultDataTracker.java @@ -23,11 +23,11 @@ import java.util.List; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import harry.core.Configuration; +import harry.core.VisibleForTesting; public class DefaultDataTracker implements DataTracker { diff --git a/harry-core/src/harry/runner/MutatingPartitionVisitor.java b/harry-core/src/harry/runner/MutatingPartitionVisitor.java index 02aa6a1..4df793e 100644 --- a/harry-core/src/harry/runner/MutatingPartitionVisitor.java +++ b/harry-core/src/harry/runner/MutatingPartitionVisitor.java @@ -127,6 +127,7 @@ public class MutatingPartitionVisitor extends AbstractPartitionVisitor if (sut.isShutdown()) throw new IllegalStateException("System under test is shut down"); + // TODO: limit a number of retries sut.executeAsync(statement.cql(), SystemUnderTest.ConsistencyLevel.QUORUM, statement.bindings()) .whenComplete((res, t) -> { if (t != null) diff --git a/harry-core/src/harry/runner/MutatingRowVisitor.java b/harry-core/src/harry/runner/MutatingRowVisitor.java index b928df7..a14fc96 100644 --- a/harry-core/src/harry/runner/MutatingRowVisitor.java +++ b/harry-core/src/harry/runner/MutatingRowVisitor.java @@ -20,6 +20,7 @@ package harry.runner; import harry.core.MetricReporter; import harry.core.Run; +import harry.core.VisibleForTesting; import harry.ddl.SchemaSpec; import harry.model.OpSelectors; import harry.operations.CompiledStatement; @@ -37,21 +38,35 @@ public class MutatingRowVisitor implements Operation public MutatingRowVisitor(Run run) { - this.metricReporter = run.metricReporter; - this.schema = run.schemaSpec; - this.clock = run.clock; - this.descriptorSelector = run.descriptorSelector; - this.rangeSelector = run.rangeSelector; + this(run.schemaSpec, + run.clock, + run.descriptorSelector, + run.rangeSelector, + run.metricReporter); } - public CompiledStatement write(long lts, long pd, long cd, long opId) + @VisibleForTesting + public MutatingRowVisitor(SchemaSpec schema, + OpSelectors.MonotonicClock clock, + OpSelectors.DescriptorSelector descriptorSelector, + QueryGenerator rangeSelector, + MetricReporter metricReporter) + { + this.metricReporter = metricReporter; + this.schema = schema; + this.clock = clock; + this.descriptorSelector = descriptorSelector; + this.rangeSelector = rangeSelector; + } + + public CompiledStatement insert(long lts, long pd, long cd, long opId) { metricReporter.insert(); long[] vds = descriptorSelector.vds(pd, cd, lts, opId, schema); return WriteHelper.inflateInsert(schema, pd, cd, vds, null, clock.rts(lts)); } - public CompiledStatement writeWithStatics(long lts, long pd, long cd, long opId) + public CompiledStatement insertWithStatics(long lts, long pd, long cd, long opId) { metricReporter.insert(); long[] vds = descriptorSelector.vds(pd, cd, lts, opId, schema); @@ -59,6 +74,21 @@ public class MutatingRowVisitor implements Operation return WriteHelper.inflateInsert(schema, pd, cd, vds, sds, clock.rts(lts)); } + public CompiledStatement update(long lts, long pd, long cd, long opId) + { + metricReporter.insert(); + long[] vds = descriptorSelector.vds(pd, cd, lts, opId, schema); + return WriteHelper.inflateUpdate(schema, pd, cd, vds, null, clock.rts(lts)); + } + + public CompiledStatement updateWithStatics(long lts, long pd, long cd, long opId) + { + metricReporter.insert(); + long[] vds = descriptorSelector.vds(pd, cd, lts, opId, schema); + long[] sds = descriptorSelector.sds(pd, cd, lts, opId, schema); + return WriteHelper.inflateUpdate(schema, pd, cd, vds, sds, clock.rts(lts)); + } + public CompiledStatement deleteColumn(long lts, long pd, long cd, long opId) { metricReporter.columnDelete(); diff --git a/harry-core/src/harry/runner/Operation.java b/harry-core/src/harry/runner/Operation.java index e56be4c..f44f3bd 100644 --- a/harry-core/src/harry/runner/Operation.java +++ b/harry-core/src/harry/runner/Operation.java @@ -35,12 +35,16 @@ public interface Operation { // TODO: switch to EnumMap // TODO: pluggable capabilities; OperationKind can/should bear its own logic - case WRITE: - return write(lts, pd, cd, opId); + case INSERT: + return insert(lts, pd, cd, opId); + case UPDATE: + return update(lts, pd, cd, opId); case DELETE_ROW: return deleteRow(lts, pd, cd, opId); - case WRITE_WITH_STATICS: - return writeWithStatics(lts, pd, cd, opId); + case INSERT_WITH_STATICS: + return insertWithStatics(lts, pd, cd, opId); + case UPDATE_WITH_STATICS: + return updateWithStatics(lts, pd, cd, opId); case DELETE_PARTITION: return deletePartition(lts, pd, opId); case DELETE_COLUMN: @@ -56,7 +60,11 @@ public interface Operation } } - CompiledStatement write(long lts, long pd, long cd, long opId); + CompiledStatement insert(long lts, long pd, long cd, long opId); + CompiledStatement update(long lts, long pd, long cd, long opId); + + CompiledStatement insertWithStatics(long lts, long pd, long cd, long opId); + CompiledStatement updateWithStatics(long lts, long pd, long cd, long opId); CompiledStatement deleteColumn(long lts, long pd, long cd, long opId); @@ -66,8 +74,6 @@ public interface Operation CompiledStatement deletePartition(long lts, long pd, long opId); - CompiledStatement writeWithStatics(long lts, long pd, long cd, long opId); - CompiledStatement deleteRange(long lts, long pd, long opId); CompiledStatement deleteSlice(long lts, long pd, long opId); diff --git a/harry-core/src/harry/runner/QueryGenerator.java b/harry-core/src/harry/runner/QueryGenerator.java index 65b21ac..829cf8b 100644 --- a/harry-core/src/harry/runner/QueryGenerator.java +++ b/harry-core/src/harry/runner/QueryGenerator.java @@ -317,7 +317,7 @@ public class QueryGenerator // TODO: one of the ways to get rid of garbage here, and potentially even simplify the code is to // simply return bounds here. After bounds are created, we slice them and generate query right // from the bounds. In this case, we can even say that things like -inf/+inf are special values, - // and use them as placeholdrs. Also, it'll be easier to manipulate relations. + // and use them as placeholders. Also, it'll be easier to manipulate relations. return new Query.ClusteringRangeQuery(Query.QueryKind.CLUSTERING_RANGE, pd, stitchedMin, diff --git a/harry-core/src/harry/util/BitSet.java b/harry-core/src/harry/util/BitSet.java index 3c70052..4dc8823 100644 --- a/harry-core/src/harry/util/BitSet.java +++ b/harry-core/src/harry/util/BitSet.java @@ -178,7 +178,7 @@ public interface BitSet public boolean isSet(int idx) { - assert idx < size(); + assert idx < size() : String.format("Trying to query the bit (%s) outside the range of bitset (%s)", idx, size()); return BitSet.isSet(bits, idx); } diff --git a/harry-core/src/harry/util/TestRunner.java b/harry-core/src/harry/util/TestRunner.java index 9abea62..4349fd2 100644 --- a/harry-core/src/harry/util/TestRunner.java +++ b/harry-core/src/harry/util/TestRunner.java @@ -44,19 +44,24 @@ public class TestRunner public static <T1, T2> void test(Generator<T1> gen1, Function<T1, Generator<T2>> gen2, - Consumer<T2> validate) + ThrowingConsumer<T2> validate) throws Throwable { test(gen1, (v1) -> test(gen2.apply(v1), validate)); } public static <T1> void test(Generator<T1> gen1, - Consumer<T1> validate) + ThrowingConsumer<T1> validate) throws Throwable { for (int i = 0; i < CYCLES; i++) { validate.accept(gen1.generate(rand)); } } + + public static interface ThrowingConsumer<T> + { + void accept(T t) throws Throwable; + } } diff --git a/harry-core/test/harry/model/OpSelectorsTest.java b/harry-core/test/harry/model/OpSelectorsTest.java index b291cbc..266aeb9 100644 --- a/harry-core/test/harry/model/OpSelectorsTest.java +++ b/harry-core/test/harry/model/OpSelectorsTest.java @@ -25,7 +25,6 @@ import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.function.BiConsumer; @@ -39,7 +38,6 @@ import harry.core.Run; import harry.ddl.ColumnSpec; import harry.ddl.SchemaGenerators; import harry.ddl.SchemaSpec; -import harry.generators.RngUtils; import harry.generators.Surjections; import harry.generators.distribution.Distribution; import harry.model.clock.OffsetClock; @@ -186,10 +184,11 @@ public class OpSelectorsTest OpSelectors.PdSelector pdSelector = new OpSelectors.DefaultPdSelector(rng, 10, 10); OpSelectors.DescriptorSelector ckSelector = new OpSelectors.DefaultDescriptorSelector(rng, new OpSelectors.ColumnSelectorBuilder().forAll(schema, Surjections.pick(BitSet.allUnset(0))).build(), - OpSelectors.OperationSelector.weighted(Surjections.weights(10, 10, 80), + OpSelectors.OperationSelector.weighted(Surjections.weights(10, 10, 40, 40), OpSelectors.OperationKind.DELETE_ROW, OpSelectors.OperationKind.DELETE_COLUMN, - OpSelectors.OperationKind.WRITE), + OpSelectors.OperationKind.INSERT, + OpSelectors.OperationKind.UPDATE), new Distribution.ConstantDistribution(2), new Distribution.ConstantDistribution(5), 10); @@ -217,7 +216,13 @@ public class OpSelectorsTest PartitionVisitor partitionVisitor = new MutatingPartitionVisitor(run, (r) -> new Operation() { - public CompiledStatement write(long lts, long pd, long cd, long m) + public CompiledStatement insert(long lts, long pd, long cd, long m) + { + consumer.accept(pd, cd); + return compiledStatement; + } + + public CompiledStatement update(long lts, long pd, long cd, long opId) { consumer.accept(pd, cd); return compiledStatement; @@ -247,7 +252,13 @@ public class OpSelectorsTest return compiledStatement; } - public CompiledStatement writeWithStatics(long lts, long pd, long cd, long opId) + public CompiledStatement insertWithStatics(long lts, long pd, long cd, long opId) + { + consumer.accept(pd, cd); + return compiledStatement; + } + + public CompiledStatement updateWithStatics(long lts, long pd, long cd, long opId) { consumer.accept(pd, cd); return compiledStatement; @@ -290,10 +301,11 @@ public class OpSelectorsTest OpSelectors.DescriptorSelector ckSelector = new OpSelectors.HierarchicalDescriptorSelector(rng, new int[] {10, 20}, OpSelectors.columnSelectorBuilder().forAll(schema, Surjections.pick(BitSet.allUnset(0))).build(), - OpSelectors.OperationSelector.weighted(Surjections.weights(10, 10, 80), + OpSelectors.OperationSelector.weighted(Surjections.weights(10, 10, 40, 40), OpSelectors.OperationKind.DELETE_ROW, OpSelectors.OperationKind.DELETE_COLUMN, - OpSelectors.OperationKind.WRITE), + OpSelectors.OperationKind.INSERT, + OpSelectors.OperationKind.UPDATE), new Distribution.ConstantDistribution(2), new Distribution.ConstantDistribution(5), 100); @@ -323,8 +335,10 @@ public class OpSelectorsTest config.put(OpSelectors.OperationKind.DELETE_COLUMN, 1); config.put(OpSelectors.OperationKind.DELETE_PARTITION, 1); config.put(OpSelectors.OperationKind.DELETE_COLUMN_WITH_STATICS, 1); - config.put(OpSelectors.OperationKind.WRITE_WITH_STATICS, 1000); - config.put(OpSelectors.OperationKind.WRITE, 1000); + config.put(OpSelectors.OperationKind.UPDATE, 500); + config.put(OpSelectors.OperationKind.INSERT, 500); + config.put(OpSelectors.OperationKind.UPDATE_WITH_STATICS, 500); + config.put(OpSelectors.OperationKind.INSERT_WITH_STATICS, 500); int[] weights = new int[config.size()]; for (int i = 0; i < config.values().size(); i++) --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
