Author: jbellis
Date: Mon Oct 24 20:20:23 2011
New Revision: 1188353
URL: http://svn.apache.org/viewvc?rev=1188353&view=rev
Log:
remove incorrect optimization from slice read path
patch by jbellis; reviewed by slebresne for CASSANDRA-3390
Modified:
cassandra/branches/cassandra-0.8/CHANGES.txt
cassandra/branches/cassandra-0.8/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
Modified: cassandra/branches/cassandra-0.8/CHANGES.txt
URL:
http://svn.apache.org/viewvc/cassandra/branches/cassandra-0.8/CHANGES.txt?rev=1188353&r1=1188352&r2=1188353&view=diff
==============================================================================
--- cassandra/branches/cassandra-0.8/CHANGES.txt (original)
+++ cassandra/branches/cassandra-0.8/CHANGES.txt Mon Oct 24 20:20:23 2011
@@ -29,7 +29,9 @@
* fix assertionError during repair with ordered partitioners (CASSANDRA-3369)
* correctly serialize key_validation_class for avro (CASSANDRA-3391)
* don't expire counter tombstone after streaming (CASSANDRA-3394)
- * prevent nodes that failed to join from hanging around forever
(CASSANDRA-3351)
+ * prevent nodes that failed to join from hanging around forever
+ (CASSANDRA-3351)
+ * remove incorrect optimization from slice read path (CASSANDRA-3390)
0.8.7
Modified:
cassandra/branches/cassandra-0.8/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
URL:
http://svn.apache.org/viewvc/cassandra/branches/cassandra-0.8/src/java/org/apache/cassandra/db/ColumnFamilyStore.java?rev=1188353&r1=1188352&r2=1188353&view=diff
==============================================================================
---
cassandra/branches/cassandra-0.8/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
(original)
+++
cassandra/branches/cassandra-0.8/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
Mon Oct 24 20:20:23 2011
@@ -1327,44 +1327,8 @@ public class ColumnFamilyStore implement
* tombstones that are no longer relevant. */
ColumnFamily filterColumnFamily(ColumnFamily cached, QueryFilter filter,
int gcBefore)
{
- // special case slicing the entire row:
- // we can skip the filter step entirely, and we can help out
removeDeleted by re-caching the result
- // if any tombstones have aged out since last time. (This means that
the row cache will treat gcBefore as
- // max(gcBefore, all previous gcBefore), which is fine for
correctness.)
- //
- // But, if the filter is asking for less columns than we have cached,
we fall back to the slow path
- // since we have to copy out a subset.
- if (filter.filter instanceof SliceQueryFilter)
- {
- SliceQueryFilter sliceFilter = (SliceQueryFilter) filter.filter;
- if (sliceFilter.start.remaining() == 0 &&
sliceFilter.finish.remaining() == 0)
- {
- if (cached.isSuper() && filter.path.superColumnName != null)
- {
- // subcolumns from named supercolumn
- IColumn sc = cached.getColumn(filter.path.superColumnName);
- if (sc == null || sliceFilter.count >=
sc.getSubColumns().size())
- {
- ColumnFamily cf = cached.cloneMeShallow();
- if (sc != null)
- cf.addColumn(sc);
- return removeDeleted(cf, gcBefore);
- }
- }
- else
- {
- // top-level columns
- if (sliceFilter.count >= cached.getColumnCount())
- {
- removeDeletedColumnsOnly(cached, gcBefore);
- return removeDeletedCF(cached, gcBefore);
- }
- }
- }
- }
-
+ ColumnFamily cf = cached.cloneMeShallow();
IColumnIterator ci = filter.getMemtableColumnIterator(cached, null,
getComparator());
- ColumnFamily cf = ci.getColumnFamily().cloneMeShallow();
filter.collectCollatedColumns(cf, ci, gcBefore);
// TODO this is necessary because when we collate supercolumns
together, we don't check
// their subcolumns for relevance, so we need to do a second prune
post facto here.