add tracing statements for read path
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/900e6217 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/900e6217 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/900e6217 Branch: refs/heads/trunk Commit: 900e62177282c969c0771ae7757653f2ea60f982 Parents: af97b0f Author: Jonathan Ellis <[email protected]> Authored: Thu Oct 25 23:36:47 2012 -0500 Committer: Jonathan Ellis <[email protected]> Committed: Fri Oct 26 01:06:18 2012 -0700 ---------------------------------------------------------------------- .../apache/cassandra/db/CollationController.java | 12 ++++- .../org/apache/cassandra/db/ColumnFamilyStore.java | 34 +++++++------- .../apache/cassandra/io/sstable/SSTableReader.java | 14 ++++++ 3 files changed, 41 insertions(+), 19 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/900e6217/src/java/org/apache/cassandra/db/CollationController.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/CollationController.java b/src/java/org/apache/cassandra/db/CollationController.java index 3a7e986..7bec7b2 100644 --- a/src/java/org/apache/cassandra/db/CollationController.java +++ b/src/java/org/apache/cassandra/db/CollationController.java @@ -71,7 +71,7 @@ public class CollationController */ private ColumnFamily collectTimeOrderedData() { - logger.debug("collectTimeOrderedData"); + logger.trace("collectTimeOrderedData"); // AtomicSortedColumns doesn't work for super columi ns (see #3821) ISortedColumns.Factory factory = mutableColumns @@ -79,9 +79,11 @@ public class CollationController : TreeMapBackedSortedColumns.factory(); ColumnFamily container = ColumnFamily.create(cfs.metadata, factory, filter.filter.isReversed()); List<OnDiskAtomIterator> iterators = new ArrayList<OnDiskAtomIterator>(); + logger.debug("Acquiring sstable references"); ColumnFamilyStore.ViewFragment view = cfs.markReferenced(filter.key); try { + logger.debug("Merging memtable contents"); for (Memtable memtable : view.memtables) { OnDiskAtomIterator iter = filter.getMemtableColumnIterator(memtable); @@ -130,6 +132,7 @@ public class CollationController container.delete(cf); sstablesIterated++; + logger.debug("Merging data from sstable {}", sstable.descriptor.generation); while (iter.hasNext()) container.addAtom(iter.next()); } @@ -162,6 +165,7 @@ public class CollationController } }; ColumnFamily returnCF = container.cloneMeShallow(); + logger.debug("Final collate"); filter.collateOnDiskAtom(returnCF, Collections.singletonList(toCollate), gcBefore); // "hoist up" the requested data into a more recent sstable @@ -169,6 +173,7 @@ public class CollationController && !cfs.isCompactionDisabled() && cfs.getCompactionStrategy() instanceof SizeTieredCompactionStrategy) { + logger.debug("Defragmenting requested data"); RowMutation rm = new RowMutation(cfs.table.name, new Row(filter.key, returnCF.cloneMe())); // skipping commitlog and index updates is fine since we're just de-fragmenting existing data Table.open(rm.getTable()).apply(rm, false, false); @@ -212,17 +217,19 @@ public class CollationController */ private ColumnFamily collectAllData() { - logger.debug("collectAllData"); + logger.trace("collectAllData"); // AtomicSortedColumns doesn't work for super columns (see #3821) ISortedColumns.Factory factory = mutableColumns ? cfs.metadata.cfType == ColumnFamilyType.Super ? ThreadSafeSortedColumns.factory() : AtomicSortedColumns.factory() : ArrayBackedSortedColumns.factory(); + logger.debug("Acquiring sstable references"); ColumnFamilyStore.ViewFragment view = cfs.markReferenced(filter.key); List<OnDiskAtomIterator> iterators = new ArrayList<OnDiskAtomIterator>(Iterables.size(view.memtables) + view.sstables.size()); ColumnFamily returnCF = ColumnFamily.create(cfs.metadata, factory, filter.filter.isReversed()); try { + logger.debug("Merging memtable contents"); for (Memtable memtable : view.memtables) { OnDiskAtomIterator iter = filter.getMemtableColumnIterator(memtable); @@ -276,6 +283,7 @@ public class CollationController if (iterators.isEmpty()) return null; + logger.debug("Merging data from memtables and sstables"); filter.collateOnDiskAtom(returnCF, iterators, gcBefore); // Caller is responsible for final removeDeletedCF. This is important for cacheRow to work correctly: http://git-wip-us.apache.org/repos/asf/cassandra/blob/900e6217/src/java/org/apache/cassandra/db/ColumnFamilyStore.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index fae37a2..0e40fdc 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -1157,11 +1157,14 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean if (cached instanceof RowCacheSentinel) { // Some other read is trying to cache the value, just do a normal non-caching read + logger.debug("Row cache miss (race)"); return getTopLevelColumns(filter, Integer.MIN_VALUE, false); } + logger.debug("Row cache hit"); return (ColumnFamily) cached; } + logger.debug("Row cache miss"); RowCacheSentinel sentinel = new RowCacheSentinel(); boolean sentinelSuccess = CacheService.instance.rowCache.putIfAbsent(key, sentinel); @@ -1185,29 +1188,14 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean ColumnFamily getColumnFamily(QueryFilter filter, int gcBefore) { assert columnFamily.equals(filter.getColumnFamilyName()) : filter.getColumnFamilyName(); - logger.debug("Executing single-partition query"); ColumnFamily result = null; long start = System.nanoTime(); try { - - if (!isRowCacheEnabled()) - { - ColumnFamily cf = getTopLevelColumns(filter, gcBefore, false); - - if (cf == null) - return null; - - // TODO this is necessary because when we collate supercolumns together, we don't check - // their subcolumns for relevance, so we need to do a second prune post facto here. - result = cf.isSuper() ? removeDeleted(cf, gcBefore) : removeDeletedCF(cf, gcBefore); - - } - else + if (isRowCacheEnabled()) { - UUID cfId = Schema.instance.getId(table.name, columnFamily); if (cfId == null) { @@ -1224,13 +1212,24 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean result = filterColumnFamily(cached, filter, gcBefore); } + else + { + ColumnFamily cf = getTopLevelColumns(filter, gcBefore, false); + + if (cf == null) + return null; + + // TODO this is necessary because when we collate supercolumns together, we don't check + // their subcolumns for relevance, so we need to do a second prune post facto here. + result = cf.isSuper() ? removeDeleted(cf, gcBefore) : removeDeletedCF(cf, gcBefore); + + } } finally { metric.readLatency.addNano(System.nanoTime() - start); } - logger.debug("Read {} cells", result == null ? 0 : result.getColumnCount()); return result; } @@ -1353,6 +1352,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean public ColumnFamily getTopLevelColumns(QueryFilter filter, int gcBefore, boolean forCache) { + logger.debug("Executing single-partition query"); CollationController controller = new CollationController(this, forCache, filter, gcBefore); ColumnFamily columns = controller.getTopLevelColumns(); metric.updateSSTableIterated(controller.getSstablesIterated()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/900e6217/src/java/org/apache/cassandra/io/sstable/SSTableReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java index 91713a1..b9b93a8 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java @@ -730,7 +730,10 @@ public class SSTableReader extends SSTable { assert key instanceof DecoratedKey; // EQ only make sense if the key is a valid row key if (!bf.isPresent(((DecoratedKey)key).key)) + { + logger.debug("Bloom filter allows skipping sstable {}", descriptor.generation); return null; + } } // next, the key cache (only make sense for valid row key) @@ -742,6 +745,7 @@ public class SSTableReader extends SSTable if (cachedPosition != null) { logger.trace("Cache hit for {} -> {}", cacheKey, cachedPosition); + logger.debug("Key cache hit for sstable {}", descriptor.generation); return cachedPosition; } } @@ -755,9 +759,14 @@ public class SSTableReader extends SSTable // we matched the -1th position: if the operator might match forward, we'll start at the first // position. We however need to return the correct index entry for that first position. if (op.apply(1) >= 0) + { sampledPosition = 0; + } else + { + logger.debug("Index sample allows skipping sstable {}", descriptor.generation); return null; + } } // scan the on-disk index, starting at the nearest sampled position. @@ -795,7 +804,10 @@ public class SSTableReader extends SSTable opSatisfied = (v == 0); exactMatch = (comparison == 0); if (v < 0) + { + logger.debug("Partition index lookup allows skipping sstable {}", descriptor.generation); return null; + } } if (opSatisfied) @@ -822,6 +834,7 @@ public class SSTableReader extends SSTable } if (op == Operator.EQ && updateCacheAndStats) bloomFilterTracker.addTruePositive(); + logger.debug("Partition index lookup complete for sstable {}", descriptor.generation); return indexEntry; } @@ -841,6 +854,7 @@ public class SSTableReader extends SSTable if (op == Operator.EQ && updateCacheAndStats) bloomFilterTracker.addFalsePositive(); + logger.debug("Partition index lookup complete (bloom filter false positive) {}", descriptor.generation); return null; }
