dcapwell commented on code in PR #3481:
URL: https://github.com/apache/cassandra/pull/3481#discussion_r1733541877


##########
src/java/org/apache/cassandra/db/virtual/AccordVirtualTables.java:
##########
@@ -246,6 +259,106 @@ public DataSet data()
         }
     }
 
+    public static class TxnBlockedByTable extends AbstractVirtualTable
+    {
+        enum Reason { Self, Txn, Key }
+        private final UserType partitionKeyType;
+
+        protected TxnBlockedByTable(String keyspace)
+        {
+            super(TableMetadata.builder(keyspace, "txn_blocked_by")
+                               .kind(TableMetadata.Kind.VIRTUAL)
+                               .addPartitionKeyColumn("txn_id", 
UTF8Type.instance)
+                               .addClusteringColumn("store_id", 
Int32Type.instance)
+                               .addClusteringColumn("depth", 
Int32Type.instance)
+                               .addClusteringColumn("blocked_by", 
UTF8Type.instance)
+                               .addClusteringColumn("reason", 
UTF8Type.instance)
+                               .addRegularColumn("save_status", 
UTF8Type.instance)
+                               .addRegularColumn("execute_at", 
UTF8Type.instance)
+                               .addRegularColumn("key", pkType(keyspace))
+                               .build());
+            partitionKeyType = pkType(keyspace);
+        }
+
+        private static UserType pkType(String keyspace)
+        {
+            return new UserType(keyspace, bytes("partition_key"),
+                                
Arrays.asList(FieldIdentifier.forQuoted("table"), 
FieldIdentifier.forQuoted("token")),
+                                Arrays.asList(UTF8Type.instance, 
UTF8Type.instance), false);
+        }
+
+        private ByteBuffer pk(PartitionKey pk)
+        {
+            var tm = Schema.instance.getTableMetadata(pk.table());
+            return 
partitionKeyType.pack(UTF8Type.instance.decompose(tm.toString()),
+                                         
UTF8Type.instance.decompose(pk.token().toString()));
+        }
+
+        @Override
+        public Iterable<UserType> userTypes()
+        {
+            return Arrays.asList(partitionKeyType);
+        }
+
+        @Override
+        public DataSet data(DecoratedKey partitionKey)
+        {
+            TxnId id = 
TxnId.parse(UTF8Type.instance.compose(partitionKey.getKey()));
+            List<StoreTxnState> shards = AccordService.instance().debug(id);
+
+            SimpleDataSet ds = new SimpleDataSet(metadata());
+            for (StoreTxnState shard : shards)
+            {
+                Set<TxnId> processed = new HashSet<>();
+                process(ds, shard, processed, id, 0, id, Reason.Self, null);
+                // everything was processed right?
+                Sets.SetView<TxnId> skipped = 
Sets.difference(shard.txns.keySet(), processed);
+                if (!skipped.isEmpty())
+                    throw new IllegalStateException("Skipped txns: " + 
skipped);
+            }
+
+            return ds;
+        }
+
+        private void process(SimpleDataSet ds, StoreTxnState shard, Set<TxnId> 
processed, TxnId userTxn, int depth, TxnId txnId, Reason reason, Runnable 
onDone)
+        {
+            if (!processed.add(txnId))
+                throw new IllegalStateException("Double processed " + txnId);
+            StoreTxnState.TxnState txn = shard.txns.get(txnId);
+            if (txn == null)
+            {
+                Invariants.checkState(reason == Reason.Self, "Txn %s unknown 
for reason %s", txnId, reason);
+                return;
+            }
+            ds.row(userTxn.toString(), shard.storeId, depth, reason == 
Reason.Self ? "" : txn.txnId.toString(), reason.name());
+            ds.column("save_status", txn.saveStatus.name());
+            if (txn.executeAt != null)
+                ds.column("execute_at", txn.executeAt.toString());
+            if (onDone != null)
+                onDone.run();
+            if (!txn.notBlocked())
+            {
+                for (TxnId blockedBy : txn.blockedBy)
+                {
+                    if (processed.contains(blockedBy)) continue; // already 
listed
+                    process(ds, shard, processed, userTxn, depth + 1, 
blockedBy, Reason.Txn, null);
+                }
+                for (PartitionKey blockedBy : txn.blockedByKey)
+                {
+                    TxnId blocking = shard.keys.get(blockedBy);
+                    if (processed.contains(blocking)) continue; // already 
listed

Review Comment:
   > The only possible loop
   
   this is not only due to loops.  Lets say we have the following
   
   ```
   T1
   T2 blocked by T1
   T3 blocked by T1
   T4 blocked by key K1 (nextApply=T1)
   ```
   
   Can I say that T3 will only ever depend on T2? don't know... can I say that 
some txn will depend directly while others depend on keys?  don't know... 
mostly figure its safer to keep track than to fail in a random way if/when this 
happens. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to