Merge branch 'cassandra-1.2' into cassandra-2.0
Conflicts:
CHANGES.txt
src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/096f2bf9
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/096f2bf9
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/096f2bf9
Branch: refs/heads/trunk
Commit: 096f2bf92049ad3d955eb1189526800c8292dd94
Parents: 52678a2 caef32e
Author: Aleksey Yeschenko <[email protected]>
Authored: Tue Sep 10 17:58:19 2013 +0300
Committer: Aleksey Yeschenko <[email protected]>
Committed: Tue Sep 10 17:58:19 2013 +0300
----------------------------------------------------------------------
CHANGES.txt | 2 ++
.../apache/cassandra/db/ColumnFamilyStore.java | 2 +-
.../db/index/composites/CompositesSearcher.java | 26 +++++++++-----------
3 files changed, 14 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/096f2bf9/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index abbb4f9,2328bf7..22fa74b
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -23,54 -7,15 +23,56 @@@ Merged from 1.2
* Allow disabling SlabAllocator (CASSANDRA-5935)
* Make user-defined compaction JMX blocking (CASSANDRA-4952)
* Fix streaming does not transfer wrapped range (CASSANDRA-5948)
+ * Fix loading index summary containing empty key (CASSANDRA-5965)
+ * Correctly handle limits in CompositesSearcher (CASSANDRA-5975)
-1.2.9
+2.0.0
+ * Fix thrift validation when inserting into CQL3 tables (CASSANDRA-5138)
+ * Fix periodic memtable flushing behavior with clean memtables
(CASSANDRA-5931)
+ * Fix dateOf() function for pre-2.0 timestamp columns (CASSANDRA-5928)
+ * Fix SSTable unintentionally loads BF when opened for batch (CASSANDRA-5938)
+ * Add stream session progress to JMX (CASSANDRA-4757)
+ * Fix NPE during CAS operation (CASSANDRA-5925)
+Merged from 1.2:
* Fix getBloomFilterDiskSpaceUsed for AlwaysPresentFilter (CASSANDRA-5900)
- * migrate 1.1 schema_columnfamilies.key_alias column to key_aliases
- (CASSANDRA-5800)
- * add --migrate option to sstableupgrade and sstablescrub (CASSANDRA-5831)
+ * Don't announce schema version until we've loaded the changes locally
+ (CASSANDRA-5904)
+ * Fix to support off heap bloom filters size greater than 2 GB
(CASSANDRA-5903)
+ * Properly handle parsing huge map and set literals (CASSANDRA-5893)
+
+
+2.0.0-rc2
+ * enable vnodes by default (CASSANDRA-5869)
+ * fix CAS contention timeout (CASSANDRA-5830)
+ * fix HsHa to respect max frame size (CASSANDRA-4573)
+ * Fix (some) 2i on composite components omissions (CASSANDRA-5851)
+ * cqlsh: add DESCRIBE FULL SCHEMA variant (CASSANDRA-5880)
+Merged from 1.2:
+ * Correctly validate sparse composite cells in scrub (CASSANDRA-5855)
+ * Add KeyCacheHitRate metric to CF metrics (CASSANDRA-5868)
+ * cqlsh: add support for multiline comments (CASSANDRA-5798)
+ * Handle CQL3 SELECT duplicate IN restrictions on clustering columns
+ (CASSANDRA-5856)
+
+
+2.0.0-rc1
+ * improve DecimalSerializer performance (CASSANDRA-5837)
+ * fix potential spurious wakeup in AsyncOneResponse (CASSANDRA-5690)
+ * fix schema-related trigger issues (CASSANDRA-5774)
+ * Better validation when accessing CQL3 table from thrift (CASSANDRA-5138)
+ * Fix assertion error during repair (CASSANDRA-5801)
+ * Fix range tombstone bug (CASSANDRA-5805)
+ * DC-local CAS (CASSANDRA-5797)
+ * Add a native_protocol_version column to the system.local table
(CASSANRDA-5819)
+ * Use index_interval from cassandra.yaml when upgraded (CASSANDRA-5822)
+ * Fix buffer underflow on socket close (CASSANDRA-5792)
+Merged from 1.2:
+ * Fix reading DeletionTime from 1.1-format sstables (CASSANDRA-5814)
+ * cqlsh: add collections support to COPY (CASSANDRA-5698)
+ * retry important messages for any IOException (CASSANDRA-5804)
+ * Allow empty IN relations in SELECT/UPDATE/DELETE statements
(CASSANDRA-5626)
+ * cqlsh: fix crashing on Windows due to libedit detection (CASSANDRA-5812)
* fix bulk-loading compressed sstables (CASSANDRA-5820)
* (Hadoop) fix quoting in CqlPagingRecordReader and CqlRecordWriter
(CASSANDRA-5824)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/096f2bf9/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/096f2bf9/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --cc
src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
index f9b7b11,1e9d59d..011839e
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
@@@ -101,12 -170,15 +101,14 @@@ public class CompositesSearcher extend
return new ColumnFamilyStore.AbstractScanIterator()
{
private ByteBuffer lastSeenPrefix = startPrefix;
- private ArrayDeque<IColumn> indexColumns;
- private final QueryPath path = new
QueryPath(baseCfs.columnFamily);
+ private Deque<Column> indexColumns;
private int columnsRead = Integer.MAX_VALUE;
- private int limit =
((SliceQueryFilter)filter.initialFilter()).count;
++ private int limit = filter.currentLimit();
+ private int columnsCount = 0;
- private final int meanColumns =
Math.max(index.getIndexCfs().getMeanColumns(), 1);
+ private int meanColumns =
Math.max(index.getIndexCfs().getMeanColumns(), 1);
// We shouldn't fetch only 1 row as this provides buggy paging in
case the first row doesn't satisfy all clauses
-- private final int rowsPerQuery =
Math.max(Math.min(filter.maxRows(), filter.maxColumns() / meanColumns), 2);
++ private int rowsPerQuery = Math.max(Math.min(filter.maxRows(),
filter.maxColumns() / meanColumns), 2);
public boolean needsFiltering()
{
@@@ -138,11 -206,9 +136,9 @@@
while (true)
{
- // Did we got more columns that needed to respect the
user limit?
- // (but we still need to return what was fetch already)
+ // Did we get more columns that needed to respect the
user limit?
+ // (but we still need to return what has been fetched
already)
- if (columnsCount >= limit)
+ if (columnsCount > limit)
return makeReturn(currentKey, data);
if (indexColumns == null || indexColumns.isEmpty())
@@@ -162,16 -228,15 +158,16 @@@
lastSeenPrefix,
endPrefix,
false,
-
rowsPerQuery);
+
rowsPerQuery,
+
filter.timestamp);
ColumnFamily indexRow =
index.getIndexCfs().getColumnFamily(indexFilter);
- if (indexRow == null)
+ if (indexRow == null || indexRow.getColumnCount() ==
0)
return makeReturn(currentKey, data);
- Collection<IColumn> sortedColumns =
indexRow.getSortedColumns();
+ Collection<Column> sortedColumns =
indexRow.getSortedColumns();
columnsRead = sortedColumns.size();
- indexColumns = new ArrayDeque<Column>(sortedColumns);
- indexColumns = new ArrayDeque<IColumn>(sortedColumns);
- IColumn firstColumn = sortedColumns.iterator().next();
++ indexColumns = new ArrayDeque<>(sortedColumns);
+ Column firstColumn = sortedColumns.iterator().next();
// Paging is racy, so it is possible the first column
of a page is not the last seen one.
if (lastSeenPrefix != startPrefix &&
lastSeenPrefix.equals(firstColumn.name()))