belliottsmith commented on code in PR #65:
URL: https://github.com/apache/cassandra-accord/pull/65#discussion_r1443381296


##########
accord-core/src/test/java/accord/burn/BurnTest.java:
##########
@@ -70,101 +72,133 @@
 import accord.impl.list.ListResult;
 import accord.impl.list.ListUpdate;
 import accord.local.CommandStore;
+import accord.local.Node;
 import accord.local.Node.Id;
 import accord.messages.MessageType;
+import accord.messages.Reply;
 import accord.primitives.Keys;
 import accord.primitives.Range;
 import accord.primitives.Ranges;
 import accord.primitives.Timestamp;
 import accord.primitives.Txn;
+import accord.topology.Shard;
+import accord.topology.Topology;
 import accord.utils.DefaultRandom;
 import accord.utils.RandomSource;
 import accord.utils.async.AsyncExecutor;
+import org.agrona.collections.Int2ObjectHashMap;
+import org.agrona.collections.IntHashSet;
 
-import static accord.impl.IntHashKey.forHash;
+import static accord.impl.PrefixedIntHashKey.forHash;
+import static accord.impl.PrefixedIntHashKey.range;
+import static accord.impl.PrefixedIntHashKey.ranges;
 import static accord.utils.Utils.toArray;
 
 public class BurnTest
 {
     private static final Logger logger = 
LoggerFactory.getLogger(BurnTest.class);
 
-    static List<Packet> generate(RandomSource random, MessageListener 
listener, Function<? super CommandStore, AsyncExecutor> executor, List<Id> 
clients, List<Id> nodes, int keyCount, int operations)
-    {
-        List<Key> keys = new ArrayList<>();
-        for (int i = 0 ; i < keyCount ; ++i)
-            keys.add(IntHashKey.key(i));
+    private static final int HASH_RANGE_START = 0;
+    private static final int HASH_RANGE_END = 1 << 16;
 
+    static List<Packet> generate(RandomSource random, MessageListener 
listener, Function<? super CommandStore, AsyncExecutor> executor, List<Id> 
clients, List<Id> nodes, int[] keys, int operations)
+    {
         List<Packet> packets = new ArrayList<>();
-        int[] next = new int[keyCount];
+        Int2ObjectHashMap<int[]> prefixKeyUpdates = new Int2ObjectHashMap<>();
         double readInCommandStore = random.nextDouble();
 
         for (int count = 0 ; count < operations ; ++count)
         {
+            int finalCount = count;
             Id client = clients.get(random.nextInt(clients.size()));
             Id node = nodes.get(random.nextInt(nodes.size()));
 
             boolean isRangeQuery = random.nextBoolean();
+            String description;
+            Function<Node, Txn> gen;
             if (isRangeQuery)
             {
-                int rangeCount = 1 + random.nextInt(2);
-                List<Range> requestRanges = new ArrayList<>();
-                while (--rangeCount >= 0)
-                {
-                    int j = 1 + random.nextInt(0xffff), i = Math.max(0, j - (1 
+ random.nextInt(0x1ffe)));
-                    requestRanges.add(IntHashKey.range(forHash(i), 
forHash(j)));
-                }
-                Ranges ranges = Ranges.of(requestRanges.toArray(new Range[0]));
-                ListRead read = new ListRead(random.decide(readInCommandStore) 
? Function.identity() : executor, ranges, ranges);
-                ListQuery query = new ListQuery(client, count);
-                ListRequest request = new ListRequest(new Txn.InMemory(ranges, 
read, query, null), listener);
-                packets.add(new Packet(client, node, count, request));
+                description = "range";
+                gen = n -> {
+                    int[] prefixes = prefixes(n.topology().current());
+
+                    int rangeCount = 1 + random.nextInt(2);
+                    List<Range> requestRanges = new ArrayList<>();
+                    while (--rangeCount >= 0)
+                    {
+                        int prefix = random.pickInt(prefixes);
+                        int i = random.nextInt(HASH_RANGE_START, 
HASH_RANGE_END);
+                        int j = 1 + random.nextInt(i, HASH_RANGE_END);

Review Comment:
   If we're changing distribution, we should randomise the distribution for 
each run also, as my experience of zipf is that it actually is often less 
useful for this kind of test as most values are the same, and they cluster very 
small. Good distribution behaviour is annoyingly hard. Uniform is not perfect 
either, but I would ordinarily prefer it over zipf for this kind of input, so 
we should at least switch between the two.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to