This is an automated email from the ASF dual-hosted git repository.

konstantinov pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git


The following commit(s) were added to refs/heads/trunk by this push:
     new a0f97a91f2 Reduce performance impact of TableMetadataRef.get and 
KeyspaceMetadataRef.get
a0f97a91f2 is described below

commit a0f97a91f22ce74abf707b6cf59b38927202ae27
Author: Dmitry Konstantinov <[email protected]>
AuthorDate: Sun Jul 6 14:27:43 2025 +0100

    Reduce performance impact of TableMetadataRef.get and 
KeyspaceMetadataRef.get
    
    Move TableMetadataRef.get to init part of bulk operations (such as flush, 
compaction, scrab)
    Cache TableMetadata value within TableMetadataRef to reduce overheads for 
write operations, same for KeyspaceMetadata
    
    Patch by Dmitry Konstantinov; reviewed by Marcus Eriksson for 
CASSANDRA-20465
---
 CHANGES.txt                                        |  1 +
 src/java/org/apache/cassandra/db/Keyspace.java     | 40 ++++++++++++++++++++-
 .../db/memtable/ShardedSkipListMemtable.java       |  9 +++++
 .../cassandra/db/memtable/SkipListMemtable.java    |  9 +++++
 .../apache/cassandra/db/memtable/TrieMemtable.java |  8 +++++
 .../io/sstable/SSTableIdentityIterator.java        | 37 +++++++++++++------
 .../io/sstable/format/SSTableSimpleScanner.java    |  5 ++-
 .../format/big/BigSSTableReaderLoadingBuilder.java | 10 +++---
 .../io/sstable/format/big/BigTableScrubber.java    | 10 +++---
 .../format/bti/BtiTableReaderLoadingBuilder.java   |  6 ++--
 .../apache/cassandra/schema/SchemaProvider.java    |  5 ++-
 .../apache/cassandra/schema/TableMetadataRef.java  | 42 ++++++++++++++++++++--
 .../ConsensusMigrationMutationHelper.java          |  2 +-
 13 files changed, 157 insertions(+), 27 deletions(-)

diff --git a/CHANGES.txt b/CHANGES.txt
index e2262d4f38..77caf7cb25 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 5.1
+ * Reduce performance impact of TableMetadataRef.get and 
KeyspaceMetadataRef.get (CASSANDRA-20465)
  * Improve CMS initialization (CASSANDRA-21036)
  * Introducing comments and security labels for schema elements 
(CASSANDRA-20943)
  * Extend nodetool tablestats for dictionary memory usage (CASSANDRA-20940)
diff --git a/src/java/org/apache/cassandra/db/Keyspace.java 
b/src/java/org/apache/cassandra/db/Keyspace.java
index cbf6039e42..02b554fd7e 100644
--- a/src/java/org/apache/cassandra/db/Keyspace.java
+++ b/src/java/org/apache/cassandra/db/Keyspace.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicLong;
@@ -723,6 +724,20 @@ public class Keyspace
         private final String name;
         private final SchemaProvider provider;
 
+        private volatile KeyspaceMetadataCache cachedKeyspaceMetadata;
+
+        private static class KeyspaceMetadataCache
+        {
+            private final UUID lastSeenSchemaVersion;
+            private final KeyspaceMetadata keyspaceMetadata;
+
+            private KeyspaceMetadataCache(UUID lastSeenSchemaVersion, 
KeyspaceMetadata keyspaceMetadata)
+            {
+                this.lastSeenSchemaVersion = lastSeenSchemaVersion;
+                this.keyspaceMetadata = keyspaceMetadata;
+            }
+        }
+
         public KeyspaceMetadataRef(KeyspaceMetadata initial, SchemaProvider 
provider)
         {
             this.initial = initial;
@@ -734,7 +749,30 @@ public class Keyspace
         {
             if (initial != null)
                 return initial;
-            return provider.getKeyspaceMetadata(name);
+            return getWithCaching();
+        }
+
+        private KeyspaceMetadata getWithCaching()
+        {
+            UUID schemaVersion = provider.getVersion();
+            if (schemaVersion == null)
+                return provider.getKeyspaceMetadata(name);
+
+            KeyspaceMetadataCache cache = cachedKeyspaceMetadata;
+            // we assume that local keyspaces and virtual keyspaces are 
immutable, so we need to track only a distributed schema version
+            KeyspaceMetadata metadata;
+            if (cache != null && 
schemaVersion.equals(cache.lastSeenSchemaVersion) && cache.keyspaceMetadata != 
null)
+                metadata = cache.keyspaceMetadata;
+            else
+            {
+                // we always retrieve metadata after schema version and assume 
they are changed coherently
+                // we may put new metadata + old schema version to the cache 
but not vice versa
+                // it we put non-latest schema version + latest metadata then 
it will be just updated on the next get() invocation
+                metadata = provider.getKeyspaceMetadata(name);
+                if (metadata != null)
+                    cachedKeyspaceMetadata = new 
KeyspaceMetadataCache(schemaVersion, metadata);
+            }
+            return metadata;
         }
 
         public void unsetInitial()
diff --git 
a/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java 
b/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
index 9b9a531a2b..8c0897e1d4 100644
--- a/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
+++ b/src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
@@ -284,6 +284,7 @@ public class ShardedSkipListMemtable extends 
AbstractShardedMemtable
     {
         long keySize = 0;
         int keyCount = 0;
+        TableMetadata currentTableMetadata = metadata();
 
         for (Iterator<AtomicBTreePartition> it = getPartitionIterator(from, 
true, to,false); it.hasNext();)
         {
@@ -297,6 +298,8 @@ public class ShardedSkipListMemtable extends 
AbstractShardedMemtable
 
         return new AbstractFlushablePartitionSet<AtomicBTreePartition>()
         {
+            private final TableMetadata tableMetadata = currentTableMetadata;
+
             public Memtable memtable()
             {
                 return ShardedSkipListMemtable.this;
@@ -326,6 +329,12 @@ public class ShardedSkipListMemtable extends 
AbstractShardedMemtable
             {
                 return partitionKeySize;
             }
+
+            @Override
+            public TableMetadata metadata()
+            {
+                return tableMetadata;
+            }
         };
     }
 
diff --git a/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java 
b/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
index 985dd310fd..9271000f05 100644
--- a/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
+++ b/src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
@@ -257,6 +257,7 @@ public class SkipListMemtable extends 
AbstractAllocatorMemtable
         Map<PartitionPosition, AtomicBTreePartition> toFlush = 
getPartitionsSubMap(from, true, to, false);
         long keysSize = 0;
         long keyCount = 0;
+        TableMetadata currentTableMetadata = metadata();
 
         boolean trackContention = logger.isTraceEnabled();
         if (trackContention)
@@ -289,6 +290,8 @@ public class SkipListMemtable extends 
AbstractAllocatorMemtable
 
         return new AbstractFlushablePartitionSet<AtomicBTreePartition>()
         {
+            private final TableMetadata tableMetadata = currentTableMetadata;
+
             @Override
             public Memtable memtable()
             {
@@ -324,6 +327,12 @@ public class SkipListMemtable extends 
AbstractAllocatorMemtable
             {
                 return partitionKeysSize;
             }
+
+            @Override
+            public TableMetadata metadata()
+            {
+                return tableMetadata;
+            }
         };
     }
 
diff --git a/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java 
b/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java
index 3e183c572e..66b1e8cb3a 100644
--- a/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java
+++ b/src/java/org/apache/cassandra/db/memtable/TrieMemtable.java
@@ -448,6 +448,8 @@ public class TrieMemtable extends AbstractShardedMemtable
 
         return new AbstractFlushablePartitionSet<MemtablePartition>()
         {
+            private final TableMetadata tableMetadata = 
TrieMemtable.this.metadata();
+
             public Memtable memtable()
             {
                 return TrieMemtable.this;
@@ -480,6 +482,12 @@ public class TrieMemtable extends AbstractShardedMemtable
             {
                 return partitionKeySize;
             }
+
+            @Override
+            public TableMetadata metadata()
+            {
+                return tableMetadata;
+            }
         };
     }
 
diff --git 
a/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java 
b/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java
index d5a1ae8bcc..c2d7d4b7f9 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java
@@ -61,14 +61,19 @@ public class SSTableIdentityIterator implements 
Comparable<SSTableIdentityIterat
     }
 
     public static SSTableIdentityIterator create(SSTableReader sstable, 
RandomAccessReader file, DecoratedKey key)
+    {
+        return create(sstable, sstable.metadata(), file, key);
+    }
+
+    public static SSTableIdentityIterator create(SSTableReader sstable, 
TableMetadata tableMetadata, RandomAccessReader file, DecoratedKey key)
     {
         try
         {
             DeletionTime partitionLevelDeletion = 
DeletionTime.getSerializer(sstable.descriptor.version).deserialize(file);
             if (!partitionLevelDeletion.validate())
-                UnfilteredValidation.handleInvalid(sstable.metadata(), key, 
sstable, "partitionLevelDeletion="+partitionLevelDeletion.toString());
-            DeserializationHelper helper = new 
DeserializationHelper(sstable.metadata(), 
sstable.descriptor.version.correspondingMessagingVersion(), 
DeserializationHelper.Flag.LOCAL);
-            SSTableSimpleIterator iterator = 
SSTableSimpleIterator.create(sstable.metadata(), file, sstable.header, helper, 
partitionLevelDeletion);
+                UnfilteredValidation.handleInvalid(tableMetadata, key, 
sstable, "partitionLevelDeletion="+partitionLevelDeletion.toString());
+            DeserializationHelper helper = new 
DeserializationHelper(tableMetadata, 
sstable.descriptor.version.correspondingMessagingVersion(), 
DeserializationHelper.Flag.LOCAL);
+            SSTableSimpleIterator iterator = 
SSTableSimpleIterator.create(tableMetadata, file, sstable.header, helper, 
partitionLevelDeletion);
             return new SSTableIdentityIterator(sstable, key, 
partitionLevelDeletion, file.getPath(), iterator);
         }
         catch (IOException e)
@@ -84,6 +89,11 @@ public class SSTableIdentityIterator implements 
Comparable<SSTableIdentityIterat
     }
 
     public static SSTableIdentityIterator create(SSTableReader sstable, 
FileDataInput dfile, long dataPosition, DecoratedKey key, boolean tombstoneOnly)
+    {
+        return create(sstable, sstable.metadata(), dfile, dataPosition, key, 
tombstoneOnly);
+    }
+
+    public static SSTableIdentityIterator create(SSTableReader sstable, 
TableMetadata tableMetadata, FileDataInput dfile, long dataPosition, 
DecoratedKey key, boolean tombstoneOnly)
     {
         try
         {
@@ -91,12 +101,12 @@ public class SSTableIdentityIterator implements 
Comparable<SSTableIdentityIterat
             ByteBufferUtil.skipShortLength(dfile); // Skip partition key
             DeletionTime partitionLevelDeletion = 
DeletionTime.getSerializer(sstable.descriptor.version).deserialize(dfile);
             if (!partitionLevelDeletion.validate())
-                UnfilteredValidation.handleInvalid(sstable.metadata(), key, 
sstable, "partitionLevelDeletion="+partitionLevelDeletion.toString());
+                UnfilteredValidation.handleInvalid(tableMetadata, key, 
sstable, "partitionLevelDeletion="+partitionLevelDeletion.toString());
 
-            DeserializationHelper helper = new 
DeserializationHelper(sstable.metadata(), 
sstable.descriptor.version.correspondingMessagingVersion(), 
DeserializationHelper.Flag.LOCAL);
+            DeserializationHelper helper = new 
DeserializationHelper(tableMetadata, 
sstable.descriptor.version.correspondingMessagingVersion(), 
DeserializationHelper.Flag.LOCAL);
             SSTableSimpleIterator iterator = tombstoneOnly
-                    ? 
SSTableSimpleIterator.createTombstoneOnly(sstable.metadata(), dfile, 
sstable.header, helper, partitionLevelDeletion)
-                    : SSTableSimpleIterator.create(sstable.metadata(), dfile, 
sstable.header, helper, partitionLevelDeletion);
+                    ? SSTableSimpleIterator.createTombstoneOnly(tableMetadata, 
dfile, sstable.header, helper, partitionLevelDeletion)
+                    : SSTableSimpleIterator.create(tableMetadata, dfile, 
sstable.header, helper, partitionLevelDeletion);
             return new SSTableIdentityIterator(sstable, key, 
partitionLevelDeletion, dfile.getPath(), iterator);
         }
         catch (IOException e)
@@ -112,18 +122,23 @@ public class SSTableIdentityIterator implements 
Comparable<SSTableIdentityIterat
     }
 
     public static SSTableIdentityIterator create(SSTableReader sstable, 
FileDataInput dfile, boolean tombstoneOnly)
+    {
+        return create(sstable, sstable.metadata(), dfile, tombstoneOnly);
+    }
+
+    public static SSTableIdentityIterator create(SSTableReader sstable, 
TableMetadata tableMetadata, FileDataInput dfile, boolean tombstoneOnly)
     {
         try
         {
             DecoratedKey key = 
sstable.decorateKey(ByteBufferUtil.readWithShortLength(dfile));
             DeletionTime partitionLevelDeletion = 
DeletionTime.getSerializer(sstable.descriptor.version).deserialize(dfile);
             if (!partitionLevelDeletion.validate())
-                UnfilteredValidation.handleInvalid(sstable.metadata(), key, 
sstable, "partitionLevelDeletion="+partitionLevelDeletion.toString());
+                UnfilteredValidation.handleInvalid(tableMetadata, key, 
sstable, "partitionLevelDeletion="+partitionLevelDeletion.toString());
 
-            DeserializationHelper helper = new 
DeserializationHelper(sstable.metadata(), 
sstable.descriptor.version.correspondingMessagingVersion(), 
DeserializationHelper.Flag.LOCAL);
+            DeserializationHelper helper = new 
DeserializationHelper(tableMetadata, 
sstable.descriptor.version.correspondingMessagingVersion(), 
DeserializationHelper.Flag.LOCAL);
             SSTableSimpleIterator iterator = tombstoneOnly
-                                             ? 
SSTableSimpleIterator.createTombstoneOnly(sstable.metadata(), dfile, 
sstable.header, helper, partitionLevelDeletion)
-                                             : 
SSTableSimpleIterator.create(sstable.metadata(), dfile, sstable.header, helper, 
partitionLevelDeletion);
+                                             ? 
SSTableSimpleIterator.createTombstoneOnly(tableMetadata, dfile, sstable.header, 
helper, partitionLevelDeletion)
+                                             : 
SSTableSimpleIterator.create(tableMetadata, dfile, sstable.header, helper, 
partitionLevelDeletion);
             return new SSTableIdentityIterator(sstable, key, 
partitionLevelDeletion, dfile.getPath(), iterator);
         }
         catch (IOException e)
diff --git 
a/src/java/org/apache/cassandra/io/sstable/format/SSTableSimpleScanner.java 
b/src/java/org/apache/cassandra/io/sstable/format/SSTableSimpleScanner.java
index a649fbea4c..69145e1e53 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableSimpleScanner.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableSimpleScanner.java
@@ -48,6 +48,8 @@ implements ISSTableScanner
     private final RandomAccessReader dfile;
     private final SSTableReader sstable;
 
+    private final TableMetadata tableMetadata;
+
     private final Iterator<PartitionPositionBounds> rangeIterator;
 
     private long bytesScannedInPreviousRanges;
@@ -75,6 +77,7 @@ implements ISSTableScanner
 
         this.dfile = sstable.openDataReaderForScan();
         this.sstable = sstable;
+        this.tableMetadata = sstable.metadata();
         this.sizeInBytes = boundsList.stream().mapToLong(ppb -> 
ppb.upperPosition - ppb.lowerPosition).sum();
         this.compressedSizeInBytes = sstable.compression ? 
sstable.onDiskSizeForPartitionPositions(boundsList) : sizeInBytes;
         this.rangeIterator = boundsList.iterator();
@@ -190,7 +193,7 @@ implements ISSTableScanner
         if (!hasNext())
             throw new NoSuchElementException();
 
-        currentIterator = SSTableIdentityIterator.create(sstable, dfile, 
false);
+        currentIterator = SSTableIdentityIterator.create(sstable, 
tableMetadata, dfile, false);
         DecoratedKey currentKey = currentIterator.partitionKey();
         if (lastKey != null && lastKey.compareTo(currentKey) >= 0)
         {
diff --git 
a/src/java/org/apache/cassandra/io/sstable/format/big/BigSSTableReaderLoadingBuilder.java
 
b/src/java/org/apache/cassandra/io/sstable/format/big/BigSSTableReaderLoadingBuilder.java
index 3557b0d802..8f4dd50d61 100644
--- 
a/src/java/org/apache/cassandra/io/sstable/format/big/BigSSTableReaderLoadingBuilder.java
+++ 
b/src/java/org/apache/cassandra/io/sstable/format/big/BigSSTableReaderLoadingBuilder.java
@@ -46,6 +46,7 @@ import org.apache.cassandra.io.util.DiskOptimizationStrategy;
 import org.apache.cassandra.io.util.FileHandle;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FilterFactory;
@@ -199,6 +200,7 @@ public class BigSSTableReaderLoadingBuilder extends 
SortedTableReaderLoadingBuil
         DecoratedKey first = null, key = null;
         IFilter bf = null;
         IndexSummary indexSummary = null;
+        TableMetadata tableMetadata = tableMetadataRef.getLocal();
 
         // we read the positions in a BRAF, so we don't have to worry about an 
entry spanning a mmap boundary.
         try (KeyReader keyReader = createKeyReader(indexFile, 
serializationHeader, tableMetrics))
@@ -206,15 +208,15 @@ public class BigSSTableReaderLoadingBuilder extends 
SortedTableReaderLoadingBuil
             long estimatedRowsNumber = rebuildFilter || rebuildSummary ? 
estimateRowsFromIndex(indexFile) : 0;
 
             if (rebuildFilter)
-                bf = FilterFactory.getFilter(estimatedRowsNumber, 
tableMetadataRef.getLocal().params.bloomFilterFpChance);
+                bf = FilterFactory.getFilter(estimatedRowsNumber, 
tableMetadata.params.bloomFilterFpChance);
 
             try (IndexSummaryBuilder summaryBuilder = !rebuildSummary ? null : 
new IndexSummaryBuilder(estimatedRowsNumber,
-                                                                               
                        tableMetadataRef.getLocal().params.minIndexInterval,
+                                                                               
                        tableMetadata.params.minIndexInterval,
                                                                                
                        Downsampling.BASE_SAMPLING_LEVEL))
             {
                 while (!keyReader.isExhausted())
                 {
-                    key = 
tableMetadataRef.getLocal().partitioner.decorateKey(keyReader.key());
+                    key = 
tableMetadata.partitioner.decorateKey(keyReader.key());
                     if (rebuildSummary)
                     {
                         if (first == null)
@@ -229,7 +231,7 @@ public class BigSSTableReaderLoadingBuilder extends 
SortedTableReaderLoadingBuil
                 }
 
                 if (rebuildSummary)
-                    indexSummary = 
summaryBuilder.build(tableMetadataRef.getLocal().partitioner);
+                    indexSummary = 
summaryBuilder.build(tableMetadata.partitioner);
             }
         }
         catch (IOException | RuntimeException | Error ex)
diff --git 
a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScrubber.java 
b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScrubber.java
index dc991f491f..2c719d4d4f 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScrubber.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableScrubber.java
@@ -33,6 +33,7 @@ import 
org.apache.cassandra.io.sstable.format.SortedTableScrubber;
 import org.apache.cassandra.io.sstable.format.big.BigFormat.Components;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.RandomAccessReader;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.JVMStabilityInspector;
@@ -105,6 +106,7 @@ public class BigTableScrubber extends 
SortedTableScrubber<BigTableReader> implem
 
         DecoratedKey prevKey = null;
 
+        TableMetadata tableMetadata = cfs.metadata.getLocal();
         while (!dataFile.isEOF())
         {
             if (scrubInfo.isStopRequested())
@@ -117,8 +119,8 @@ public class BigTableScrubber extends 
SortedTableScrubber<BigTableReader> implem
             try
             {
                 ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
-                if (!cfs.metadata.getLocal().isIndex())
-                    cfs.metadata.getLocal().partitionKeyType.validate(raw);
+                if (!tableMetadata.isIndex())
+                    tableMetadata.partitionKeyType.validate(raw);
                 key = sstable.decorateKey(raw);
             }
             catch (Throwable th)
@@ -181,8 +183,8 @@ public class BigTableScrubber extends 
SortedTableScrubber<BigTableReader> implem
                     key = sstable.decorateKey(currentIndexKey);
                     try
                     {
-                        if (!cfs.metadata.getLocal().isIndex())
-                            
cfs.metadata.getLocal().partitionKeyType.validate(key.getKey());
+                        if (!tableMetadata.isIndex())
+                            
tableMetadata.partitionKeyType.validate(key.getKey());
                         dataFile.seek(dataStartFromIndex);
 
                         if (tryAppend(prevKey, key, writer))
diff --git 
a/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReaderLoadingBuilder.java
 
b/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReaderLoadingBuilder.java
index 8f47e89a9a..6ee0406230 100644
--- 
a/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReaderLoadingBuilder.java
+++ 
b/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReaderLoadingBuilder.java
@@ -41,6 +41,7 @@ import 
org.apache.cassandra.io.sstable.metadata.ValidationMetadata;
 import org.apache.cassandra.io.util.FileHandle;
 import org.apache.cassandra.metrics.TableMetrics;
 import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.FilterFactory;
 import org.apache.cassandra.utils.IFilter;
 import org.apache.cassandra.utils.Throwables;
@@ -163,11 +164,12 @@ public class BtiTableReaderLoadingBuilder extends 
SortedTableReaderLoadingBuilde
 
         try (KeyReader keyReader = createKeyReader(statsMetadata))
         {
-            bf = FilterFactory.getFilter(statsMetadata.totalRows, 
tableMetadataRef.getLocal().params.bloomFilterFpChance);
+            TableMetadata tableMetadata = tableMetadataRef.getLocal();
+            bf = FilterFactory.getFilter(statsMetadata.totalRows, 
tableMetadata.params.bloomFilterFpChance);
 
             while (!keyReader.isExhausted())
             {
-                DecoratedKey key = 
tableMetadataRef.getLocal().partitioner.decorateKey(keyReader.key());
+                DecoratedKey key = 
tableMetadata.partitioner.decorateKey(keyReader.key());
                 bf.add(key);
 
                 keyReader.advance();
diff --git a/src/java/org/apache/cassandra/schema/SchemaProvider.java 
b/src/java/org/apache/cassandra/schema/SchemaProvider.java
index 2e137433af..868d357d67 100644
--- a/src/java/org/apache/cassandra/schema/SchemaProvider.java
+++ b/src/java/org/apache/cassandra/schema/SchemaProvider.java
@@ -51,7 +51,10 @@ public interface SchemaProvider
 
     default UUID getVersion()
     {
-        return ClusterMetadata.current().schema.getVersion();
+        ClusterMetadata metadata = ClusterMetadata.currentNullable();
+        if (metadata == null)
+            return null;
+        return metadata.schema.getVersion();
     }
 
     Keyspaces localKeyspaces();
diff --git a/src/java/org/apache/cassandra/schema/TableMetadataRef.java 
b/src/java/org/apache/cassandra/schema/TableMetadataRef.java
index 92a17f27da..ff3bc33e2e 100644
--- a/src/java/org/apache/cassandra/schema/TableMetadataRef.java
+++ b/src/java/org/apache/cassandra/schema/TableMetadataRef.java
@@ -19,6 +19,7 @@
 package org.apache.cassandra.schema;
 
 import java.util.Optional;
+import java.util.UUID;
 
 import org.apache.cassandra.cql3.ColumnIdentifier;
 
@@ -33,6 +34,20 @@ public class TableMetadataRef
 
     private volatile TableMetadata localTableMetadata;
 
+    private volatile TableMetadataCache cachedTableMetadata;
+
+    private static class TableMetadataCache
+    {
+        private final UUID lastSeenSchemaVersion;
+        private final TableMetadata tableMetadata;
+
+        private TableMetadataCache(UUID lastSeenSchemaVersion, TableMetadata 
tableMetadata)
+        {
+            this.lastSeenSchemaVersion = lastSeenSchemaVersion;
+            this.tableMetadata = tableMetadata;
+        }
+    }
+
     public static TableMetadataRef forIndex(SchemaProvider schema, 
TableMetadata initial, String keyspace, String name, TableId id)
     {
         return new TableMetadataRef(schema, keyspace, name, id)
@@ -118,7 +133,7 @@ public class TableMetadataRef
 
     public TableMetadata get()
     {
-        TableMetadata metadata = schema.getTableMetadata(keyspace, name);
+        TableMetadata metadata = getWithCaching();
         if (metadata == null)
             throw new IllegalStateException(format("Can't deref metadata for 
%s.%s.", keyspace, name));
         return metadata;
@@ -126,7 +141,7 @@ public class TableMetadataRef
 
     public TableMetadata getOrDefault(TableMetadata dflt)
     {
-        TableMetadata tableMetadata = schema.getTableMetadata(keyspace, name);
+        TableMetadata tableMetadata = getWithCaching();
 
         if (tableMetadata == null)
             return dflt;
@@ -134,6 +149,29 @@ public class TableMetadataRef
         return tableMetadata;
     }
 
+    private TableMetadata getWithCaching()
+    {
+        UUID schemaVersion = schema.getVersion();
+        if (schemaVersion == null)
+            return schema.getTableMetadata(keyspace, name);
+
+        TableMetadataCache cache = cachedTableMetadata;
+        // we assume that local keyspaces and virtual keyspaces are immutable, 
so we need to track only a distributed schema version
+        TableMetadata metadata;
+        if (cache != null && schemaVersion.equals(cache.lastSeenSchemaVersion) 
&& cache.tableMetadata != null)
+            metadata = cache.tableMetadata;
+        else
+        {
+            // we always retrieve metadata after schema version and assume 
they are changed coherently
+            // we may put new metadata + old schema version to the cache but 
not vice versa
+            // it we put non-latest schema version + latest metadata then it 
will be just updated on the next get() invocation
+            metadata = schema.getTableMetadata(keyspace, name);
+            if (metadata != null)
+                cachedTableMetadata = new TableMetadataCache(schemaVersion, 
metadata);
+        }
+        return metadata;
+    }
+
     /**
      * Returns node-local table metadata
      */
diff --git 
a/src/java/org/apache/cassandra/service/consensus/migration/ConsensusMigrationMutationHelper.java
 
b/src/java/org/apache/cassandra/service/consensus/migration/ConsensusMigrationMutationHelper.java
index d8cc96a3a1..0e9d410303 100644
--- 
a/src/java/org/apache/cassandra/service/consensus/migration/ConsensusMigrationMutationHelper.java
+++ 
b/src/java/org/apache/cassandra/service/consensus/migration/ConsensusMigrationMutationHelper.java
@@ -307,9 +307,9 @@ public class ConsensusMigrationMutationHelper
         for (PartitionUpdate pu : mutation.getPartitionUpdates())
         {
             TableId tableId = pu.metadata().id;
-            ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
             if (tokenShouldBeWrittenThroughAccord(cm, tableId, dk.getToken(), 
TransactionalMode::nonSerialWritesThroughAccord, 
TransactionalMigrationFromMode::nonSerialWritesThroughAccord))
             {
+                ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
                 throwRetryOnDifferentSystem = true;
                 if (markedColumnFamilies == null)
                     markedColumnFamilies = new HashSet<>();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to