Repository: cassandra
Updated Branches:
  refs/heads/cassandra-3.0 517b652f5 -> ec6048758
  refs/heads/trunk 0cb5e8032 -> eace9aadd


If CF has no clustering columns, any row cache is full partition cache 
(CASSANDRA-12499)

Patch by Jeff Jirsa ; Reviewed by Sylvain Lebresne for CASSANDRA-12499


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/ec604875
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/ec604875
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/ec604875

Branch: refs/heads/cassandra-3.0
Commit: ec604875886a5790b627f0fa22cea4fca34532dd
Parents: 517b652
Author: Jeff Jirsa <jeff.ji...@crowdstrike.com>
Authored: Tue Sep 20 17:16:33 2016 -0700
Committer: Jeff Jirsa <jeff.ji...@crowdstrike.com>
Committed: Tue Sep 20 17:21:38 2016 -0700

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../db/SinglePartitionReadCommand.java          |  6 +-
 .../unit/org/apache/cassandra/SchemaLoader.java | 21 ++++--
 .../org/apache/cassandra/db/RowCacheTest.java   | 72 ++++++++++++++++++++
 4 files changed, 93 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/ec604875/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 791f99c..da56e00 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,6 +1,7 @@
 3.0.10
  * Extend ColumnIdentifier.internedInstances key to include the type that 
generated the byte buffer (CASSANDRA-12516)
  * Backport CASSANDRA-10756 (race condition in NativeTransportService 
shutdown) (CASSANDRA-12472)
+ * If CF has no clustering columns, any row cache is full partition cache 
(CASSANDRA-12499)
 Merged from 2.1:
  * Add system property to set the max number of native transport requests in 
queue (CASSANDRA-11363)
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/ec604875/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java 
b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
index 73eb9bd..886a918 100644
--- a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
@@ -407,7 +407,11 @@ public class SinglePartitionReadCommand extends ReadCommand
         cfs.metric.rowCacheMiss.inc();
         Tracing.trace("Row cache miss");
 
-        boolean cacheFullPartitions = metadata().params.caching.cacheAllRows();
+        // Note that on tables with no clustering keys, any positive value of
+        // rowsToCache implies caching the full partition
+        boolean cacheFullPartitions = metadata().clusteringColumns().size() > 
0 ?
+                                      metadata().params.caching.cacheAllRows() 
:
+                                      metadata().params.caching.cacheRows();
 
         // To be able to cache what we read, what we read must at least covers 
what the cache holds, that
         // is the 'rowsToCache' first rows of the partition. We could read 
those 'rowsToCache' first rows

http://git-wip-us.apache.org/repos/asf/cassandra/blob/ec604875/test/unit/org/apache/cassandra/SchemaLoader.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java 
b/test/unit/org/apache/cassandra/SchemaLoader.java
index dd4884c..87e8e1e 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -205,6 +205,7 @@ public class SchemaLoader
                 Tables.of(
                 standardCFMD(ks_rcs, 
"CFWithoutCache").caching(CachingParams.CACHE_NOTHING),
                 standardCFMD(ks_rcs, 
"CachedCF").caching(CachingParams.CACHE_EVERYTHING),
+                standardCFMD(ks_rcs, "CachedNoClustering", 1, 
IntegerType.instance, IntegerType.instance, 
null).caching(CachingParams.CACHE_EVERYTHING),
                 standardCFMD(ks_rcs, "CachedIntCF").
                         caching(new CachingParams(true, 100)))));
 
@@ -348,18 +349,22 @@ public class SchemaLoader
 
     public static CFMetaData standardCFMD(String ksName, String cfName, int 
columnCount, AbstractType<?> keyType, AbstractType<?> valType, AbstractType<?> 
clusteringType)
     {
-        CFMetaData.Builder builder = CFMetaData.Builder.create(ksName, cfName)
-                .addPartitionKey("key", keyType)
-                .addClusteringColumn("name", clusteringType)
-                .addRegularColumn("val", valType);
+        CFMetaData.Builder builder;
+        builder = CFMetaData.Builder.create(ksName, cfName)
+                                    .addPartitionKey("key", keyType)
+                                    .addRegularColumn("val", valType);
+
+        if(clusteringType != null)
+            builder = builder.addClusteringColumn("name", clusteringType);
 
         for (int i = 0; i < columnCount; i++)
             builder.addRegularColumn("val" + i, AsciiType.instance);
 
         return builder.build()
-               .compression(getCompressionParameters());
+                      .compression(getCompressionParameters());
     }
 
+
     public static CFMetaData denseCFMD(String ksName, String cfName)
     {
         return denseCFMD(ksName, cfName, AsciiType.instance);
@@ -524,11 +529,15 @@ public class SchemaLoader
         for (int i = offset; i < offset + numberOfRows; i++)
         {
             RowUpdateBuilder builder = new RowUpdateBuilder(cfm, 
FBUtilities.timestampMicros(), ByteBufferUtil.bytes("key"+i));
-            builder.clustering(ByteBufferUtil.bytes("col"+ i)).add("val", 
ByteBufferUtil.bytes("val" + i));
+            if (cfm.clusteringColumns() != null && 
!cfm.clusteringColumns().isEmpty())
+                builder.clustering(ByteBufferUtil.bytes("col"+ i)).add("val", 
ByteBufferUtil.bytes("val" + i));
+            else
+                builder.add("val", ByteBufferUtil.bytes("val"+i));
             builder.build().apply();
         }
     }
 
+
     public static void cleanupSavedCaches()
     {
         File cachesDir = new File(DatabaseDescriptor.getSavedCachesLocation());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/ec604875/test/unit/org/apache/cassandra/db/RowCacheTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowCacheTest.java 
b/test/unit/org/apache/cassandra/db/RowCacheTest.java
index 267e5e4..cff574a 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@ -34,6 +34,7 @@ import org.apache.cassandra.Util;
 import org.apache.cassandra.cache.RowCacheKey;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.filter.ColumnFilter;
@@ -58,6 +59,7 @@ public class RowCacheTest
     private static final String KEYSPACE_CACHED = "RowCacheTest";
     private static final String CF_CACHED = "CachedCF";
     private static final String CF_CACHEDINT = "CachedIntCF";
+    private static final String CF_CACHEDNOCLUSTER = "CachedNoClustering";
 
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
@@ -65,6 +67,8 @@ public class RowCacheTest
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE_CACHED,
                                     KeyspaceParams.simple(1),
+                                    SchemaLoader.standardCFMD(KEYSPACE_CACHED, 
CF_CACHEDNOCLUSTER, 1, AsciiType.instance, AsciiType.instance, null)
+                                                .caching(new 
CachingParams(true, 100)),
                                     SchemaLoader.standardCFMD(KEYSPACE_CACHED, 
CF_CACHED).caching(CachingParams.CACHE_EVERYTHING),
                                     SchemaLoader.standardCFMD(KEYSPACE_CACHED, 
CF_CACHEDINT, 1, IntegerType.instance)
                                                 .caching(new 
CachingParams(true, 100)));
@@ -206,6 +210,74 @@ public class RowCacheTest
     }
 
     @Test
+    public void testRowCacheNoClustering() throws Exception
+    {
+        CompactionManager.instance.disableAutoCompaction();
+
+        Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
+        ColumnFamilyStore cachedStore  = 
keyspace.getColumnFamilyStore(CF_CACHEDNOCLUSTER);
+
+        // empty the row cache
+        CacheService.instance.invalidateRowCache();
+
+        // set global row cache size to 1 MB
+        CacheService.instance.setRowCacheCapacityInMB(1);
+
+        // inserting 100 rows into column family
+        SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHEDNOCLUSTER, 0, 100);
+
+        // now reading rows one by one and checking if row cache grows
+        for (int i = 0; i < 100; i++)
+        {
+            DecoratedKey key = Util.dk("key" + i);
+
+            Util.getAll(Util.cmd(cachedStore, key).build());
+
+            assertEquals(CacheService.instance.rowCache.size(), i + 1);
+            assert(cachedStore.containsCachedParition(key)); // current key 
should be stored in the cache
+        }
+
+        // insert 10 more keys
+        SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHEDNOCLUSTER, 100, 10);
+
+        for (int i = 100; i < 110; i++)
+        {
+            DecoratedKey key = Util.dk("key" + i);
+
+            Util.getAll(Util.cmd(cachedStore, key).build());
+            assert cachedStore.containsCachedParition(key); // cache should be 
populated with the latest rows read (old ones should be popped)
+
+            // checking if cell is read correctly after cache
+            CachedPartition cp = cachedStore.getRawCachedPartition(key);
+            try (UnfilteredRowIterator ai = 
cp.unfilteredIterator(ColumnFilter.selection(cp.columns()), Slices.ALL, false))
+            {
+                assert ai.hasNext();
+                Row r = (Row)ai.next();
+                assertFalse(ai.hasNext());
+
+                Iterator<Cell> ci = r.cells().iterator();
+                assert(ci.hasNext());
+                Cell cell = ci.next();
+
+                assert 
cell.column().name.bytes.equals(ByteBufferUtil.bytes("val"));
+                assert cell.value().equals(ByteBufferUtil.bytes("val" + i));
+            }
+        }
+
+        // clear 100 rows from the cache
+        int keysLeft = 109;
+        for (int i = 109; i >= 10; i--)
+        {
+            cachedStore.invalidateCachedPartition(Util.dk("key" + i));
+            assert CacheService.instance.rowCache.size() == keysLeft;
+            keysLeft--;
+        }
+
+        CacheService.instance.setRowCacheCapacityInMB(0);
+
+    }
+
+    @Test
     public void testRowCacheLoad() throws Exception
     {
         CacheService.instance.setRowCacheCapacityInMB(1);

Reply via email to