Merge branch 'cassandra-2.0' into cassandra-2.1
Conflicts:
CHANGES.txt
src/java/org/apache/cassandra/db/CollationController.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e88b8889
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e88b8889
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e88b8889
Branch: refs/heads/trunk
Commit: e88b88891169dd8d6b768878ea99007cec65161c
Parents: 23893ea 77bbcc1
Author: Aleksey Yeschenko <[email protected]>
Authored: Wed Jun 25 14:31:47 2014 -0700
Committer: Aleksey Yeschenko <[email protected]>
Committed: Wed Jun 25 14:31:47 2014 -0700
----------------------------------------------------------------------
CHANGES.txt | 1 +
src/java/org/apache/cassandra/db/CollationController.java | 9 ++++-----
2 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/e88b8889/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index fbcb8c0,84be96d..53d388a
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,37 -1,23 +1,38 @@@
-2.0.9
+2.1.0
+Merged from 2.0:
+ * Fix CC#collectTimeOrderedData() tombstone optimisations (CASSANDRA-7394)
- * Fix assertion error in CL.ANY timeout handling (CASSANDRA-7364)
- * Handle empty CFs in Memtable#maybeUpdateLiveRatio() (CASSANDRA-7401)
+ * Support DISTINCT for static columns and fix behaviour when DISTINC is
+ not use (CASSANDRA-7305).
+
+
+2.1.0-rc2
+ * Fix heap size calculation for CompoundSparseCellName and
+ CompoundSparseCellName.WithCollection (CASSANDRA-7421)
+ * Allow counter mutations in UNLOGGED batches (CASSANDRA-7351)
+ * Modify reconcile logic to always pick a tombstone over a counter cell
+ (CASSANDRA-7346)
+ * Avoid incremental compaction on Windows (CASSANDRA-7365)
+ * Fix exception when querying a composite-keyed table with a collection index
+ (CASSANDRA-7372)
+ * Use node's host id in place of counter ids (CASSANDRA-7366)
* Fix native protocol CAS batches (CASSANDRA-7337)
+ * Reduce likelihood of contention on local paxos locking (CASSANDRA-7359)
+ * Upgrade to Pig 0.12.1 (CASSANDRA-6556)
+ * Make sure we clear out repair sessions from netstats (CASSANDRA-7329)
+ * Don't fail streams on failure detector downs (CASSANDRA-3569)
+ * Add optional keyspace to DROP INDEX statement (CASSANDRA-7314)
+ * Reduce run time for CQL tests (CASSANDRA-7327)
+ * Fix heap size calculation on Windows (CASSANDRA-7352, 7353)
+ * RefCount native frames from netty (CASSANDRA-7245)
+ * Use tarball dir instead of /var for default paths (CASSANDRA-7136)
+ * Remove rows_per_partition_to_cache keyword (CASSANDRA-7193)
+ * Fix schema change response in native protocol v3 (CASSANDRA-7413)
+Merged from 2.0:
+ * Fix assertion error in CL.ANY timeout handling (CASSANDRA-7364)
* Add per-CF range read request latency metrics (CASSANDRA-7338)
* Fix NPE in StreamTransferTask.createMessageForRetry() (CASSANDRA-7323)
- * Add conditional CREATE/DROP USER support (CASSANDRA-7264)
- * Swap local and global default read repair chances (CASSANDRA-7320)
- * Add missing iso8601 patterns for date strings (CASSANDRA-6973)
- * Support selecting multiple rows in a partition using IN (CASSANDRA-6875)
- * cqlsh: always emphasize the partition key in DESC output (CASSANDRA-7274)
- * Copy compaction options to make sure they are reloaded (CASSANDRA-7290)
- * Add option to do more aggressive tombstone compactions (CASSANDRA-6563)
- * Don't try to compact already-compacting files in HHOM (CASSANDRA-7288)
- * Add authentication support to shuffle (CASSANDRA-6484)
- * Cqlsh counts non-empty lines for "Blank lines" warning (CASSANDRA-7325)
* Make StreamSession#closeSession() idempotent (CASSANDRA-7262)
* Fix infinite loop on exception while streaming (CASSANDRA-7330)
- * Reference sstables before populating key cache (CASSANDRA-7234)
* Account for range tombstones in min/max column names (CASSANDRA-7235)
* Improve sub range repair validation (CASSANDRA-7317)
* Accept subtypes for function results, type casts (CASSANDRA-6766)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/e88b8889/src/java/org/apache/cassandra/db/CollationController.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/CollationController.java
index 1030ccf,4a08a26..061537b
--- a/src/java/org/apache/cassandra/db/CollationController.java
+++ b/src/java/org/apache/cassandra/db/CollationController.java
@@@ -78,22 -77,21 +78,24 @@@ public class CollationControlle
try
{
Tracing.trace("Merging memtable contents");
+ long mostRecentRowTombstone = Long.MIN_VALUE;
for (Memtable memtable : view.memtables)
{
- OnDiskAtomIterator iter =
filter.getMemtableColumnIterator(memtable);
- if (iter != null)
+ ColumnFamily cf = memtable.getColumnFamily(filter.key);
+ if (cf != null)
{
- iterators.add(iter);
- temp.delete(iter.getColumnFamily());
+ filter.delete(container.deletionInfo(), cf);
+ isEmpty = false;
+ Iterator<Cell> iter = filter.getIterator(cf);
while (iter.hasNext())
- temp.addAtom(iter.next());
+ {
+ Cell cell = iter.next();
+ if (copyOnHeap)
+ cell = cell.localCopy(cfs.metadata,
HeapAllocator.instance);
+ container.addColumn(cell);
+ }
}
-
- container.addAll(temp, HeapAllocator.instance);
+ mostRecentRowTombstone =
container.deletionInfo().getTopLevelDeletion().markedForDeleteAt;
- temp.clear();
}
// avoid changing the filter columns of the original filter
@@@ -103,10 -101,9 +105,9 @@@
QueryFilter reducedFilter = new QueryFilter(filter.key,
filter.cfName, namesFilter.withUpdatedColumns(filterColumns), filter.timestamp);
/* add the SSTables on disk */
- Collections.sort(view.sstables, SSTable.maxTimestampComparator);
+ Collections.sort(view.sstables,
SSTableReader.maxTimestampComparator);
// read sorted sstables
- long mostRecentRowTombstone = Long.MIN_VALUE;
for (SSTableReader sstable : view.sstables)
{
// if we've already seen a row tombstone with a timestamp
greater
@@@ -123,17 -120,18 +124,15 @@@
Tracing.trace("Merging data from sstable {}",
sstable.descriptor.generation);
OnDiskAtomIterator iter =
reducedFilter.getSSTableColumnIterator(sstable);
iterators.add(iter);
+ isEmpty = false;
if (iter.getColumnFamily() != null)
{
-- ColumnFamily cf = iter.getColumnFamily();
- if (cf.isMarkedForDelete())
- mostRecentRowTombstone =
cf.deletionInfo().getTopLevelDeletion().markedForDeleteAt;
- container.delete(cf);
- temp.delete(cf);
++ container.delete(iter.getColumnFamily());
sstablesIterated++;
while (iter.hasNext())
- temp.addAtom(iter.next());
+ container.addAtom(iter.next());
}
-
- container.addAll(temp, HeapAllocator.instance);
+ mostRecentRowTombstone =
container.deletionInfo().getTopLevelDeletion().markedForDeleteAt;
- temp.clear();
}
// we need to distinguish between "there is no data at all for
this row" (BF will let us rebuild that efficiently)