Merge branch 'cassandra-1.2' into cassandra-2.0
Conflicts:
src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/61543b4c
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/61543b4c
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/61543b4c
Branch: refs/heads/trunk
Commit: 61543b4c0080e1c2048ff43ae33d4cfc240accbb
Parents: 2015072 1cf9863
Author: Marcus Eriksson <[email protected]>
Authored: Tue Aug 26 15:28:28 2014 +0200
Committer: Marcus Eriksson <[email protected]>
Committed: Tue Aug 26 15:30:34 2014 +0200
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../db/compaction/LazilyCompactedRow.java | 7 ++-
.../apache/cassandra/db/RangeTombstoneTest.java | 58 ++++++++++++++++++++
3 files changed, 63 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/61543b4c/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index e716cb5,badb45e..7a59744
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -85,37 -11,15 +85,38 @@@ Merged from 1.2
are thrown while handling native protocol messages (CASSANDRA-7470)
* Fix row size miscalculation in LazilyCompactedRow (CASSANDRA-7543)
* Fix race in background compaction check (CASSANDRA-7745)
+ * Don't clear out range tombstones during compaction (CASSANDRA-7808)
-1.2.18
- * Support Thrift tables clustering columns on CqlPagingInputFormat
(CASSANDRA-7445)
- * Fix compilation with java 6 broke by CASSANDRA-7147
-
-
-1.2.17
+2.0.9
+ * Fix CC#collectTimeOrderedData() tombstone optimisations (CASSANDRA-7394)
+ * Fix assertion error in CL.ANY timeout handling (CASSANDRA-7364)
+ * Handle empty CFs in Memtable#maybeUpdateLiveRatio() (CASSANDRA-7401)
+ * Fix native protocol CAS batches (CASSANDRA-7337)
+ * Add per-CF range read request latency metrics (CASSANDRA-7338)
+ * Fix NPE in StreamTransferTask.createMessageForRetry() (CASSANDRA-7323)
+ * Add conditional CREATE/DROP USER support (CASSANDRA-7264)
+ * Swap local and global default read repair chances (CASSANDRA-7320)
+ * Add missing iso8601 patterns for date strings (CASSANDRA-6973)
+ * Support selecting multiple rows in a partition using IN (CASSANDRA-6875)
+ * cqlsh: always emphasize the partition key in DESC output (CASSANDRA-7274)
+ * Copy compaction options to make sure they are reloaded (CASSANDRA-7290)
+ * Add option to do more aggressive tombstone compactions (CASSANDRA-6563)
+ * Don't try to compact already-compacting files in HHOM (CASSANDRA-7288)
+ * Add authentication support to shuffle (CASSANDRA-6484)
+ * Cqlsh counts non-empty lines for "Blank lines" warning (CASSANDRA-7325)
+ * Make StreamSession#closeSession() idempotent (CASSANDRA-7262)
+ * Fix infinite loop on exception while streaming (CASSANDRA-7330)
+ * Reference sstables before populating key cache (CASSANDRA-7234)
+ * Account for range tombstones in min/max column names (CASSANDRA-7235)
+ * Improve sub range repair validation (CASSANDRA-7317)
+ * Accept subtypes for function results, type casts (CASSANDRA-6766)
+ * Support DISTINCT for static columns and fix behaviour when DISTINC is
+ not use (CASSANDRA-7305).
+ * Refuse range queries with strict bounds on compact tables since they
+ are broken (CASSANDRA-7059)
+Merged from 1.2:
+ * Expose global ColumnFamily metrics (CASSANDRA-7273)
* cqlsh: Fix CompositeType columns in DESCRIBE TABLE output (CASSANDRA-7399)
* Expose global ColumnFamily metrics (CASSANDRA-7273)
* Handle possible integer overflow in FastByteArrayOutputStream
(CASSANDRA-7373)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/61543b4c/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
index 1da1757,4360b0b..e3f18bd
--- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
+++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
@@@ -192,14 -220,26 +192,14 @@@ public class LazilyCompactedRow extend
private class Reducer extends MergeIterator.Reducer<OnDiskAtom,
OnDiskAtom>
{
// all columns reduced together will have the same name, so there
will only be one column
- // in the container; we just want to leverage the conflict resolution
code from CF
- ColumnFamily container = emptyColumnFamily.cloneMeShallow();
+ // in the container; we just want to leverage the conflict resolution
code from CF.
+ // (Note that we add the row tombstone in getReduced.)
- final ColumnFamily container =
ArrayBackedSortedColumns.factory.create(emptyColumnFamily.metadata());
++ ColumnFamily container =
ArrayBackedSortedColumns.factory.create(emptyColumnFamily.metadata());
- // tombstone reference; will be reconciled w/ column during getReduced
+ // tombstone reference; will be reconciled w/ column during
getReduced. Note that the top-level (row) tombstone
+ // is held by LCR.deletionInfo.
RangeTombstone tombstone;
- long serializedSize = 4; // int for column count
int columns = 0;
long minTimestampSeen = Long.MAX_VALUE;
long maxTimestampSeen = Long.MIN_VALUE;
@@@ -263,27 -283,22 +263,28 @@@
}
else
{
- ColumnFamily purged =
PrecompactedRow.removeDeletedAndOldShards(key, shouldPurge, controller,
container);
+ // when we clear() the container, it removes the deletion
info, so this needs to be reset each time
+ container.delete(maxRowTombstone);
+ ColumnFamily purged = PrecompactedRow.removeDeleted(key,
shouldPurge, controller, container);
if (purged == null || !purged.iterator().hasNext())
{
- container.clear();
+ // don't call clear() because that resets the deletion
time. See CASSANDRA-7808.
- container = emptyColumnFamily.cloneMeShallow();
++ container =
ArrayBackedSortedColumns.factory.create(emptyColumnFamily.metadata());;
return null;
}
- IColumn reduced = purged.iterator().next();
- container = emptyColumnFamily.cloneMeShallow();
+ Column reduced = purged.iterator().next();
- container.clear();
++ container =
ArrayBackedSortedColumns.factory.create(emptyColumnFamily.metadata());
- // PrecompactedRow.removeDeletedAndOldShards have only
checked the top-level CF deletion times,
- // not the range tombstone. For that we use the columnIndexer
tombstone tracker.
+ // PrecompactedRow.removeDeleted has only checked the
top-level CF deletion times,
+ // not the range tombstones. For that we use the
columnIndexer tombstone tracker.
if (indexBuilder.tombstoneTracker().isDeleted(reduced))
+ {
+ indexer.remove(reduced);
return null;
-
- serializedSize += reduced.serializedSizeForSSTable();
+ }
+ int localDeletionTime =
purged.deletionInfo().getTopLevelDeletion().localDeletionTime;
+ if (localDeletionTime < Integer.MAX_VALUE)
+ tombstones.update(localDeletionTime);
columns++;
minTimestampSeen = Math.min(minTimestampSeen,
reduced.minTimestamp());
maxTimestampSeen = Math.max(maxTimestampSeen,
reduced.maxTimestamp());
http://git-wip-us.apache.org/repos/asf/cassandra/blob/61543b4c/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
index 535b9e2,59be938..80982cd
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
@@@ -18,23 -18,16 +18,26 @@@
*/
package org.apache.cassandra.db;
+ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
+ import java.util.concurrent.ExecutionException;
+import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
+import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
+import org.apache.cassandra.db.index.*;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.sstable.SSTableReader;
+import org.apache.cassandra.thrift.IndexType;
+
+import org.junit.Ignore;
import org.junit.Test;
import org.apache.cassandra.SchemaLoader;
+ import org.apache.cassandra.Util;
-import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.filter.*;
import org.apache.cassandra.utils.ByteBufferUtil;
@@@ -108,6 -102,61 +111,61 @@@ public class RangeTombstoneTest extend
}
@Test
+ public void test7808_1() throws ExecutionException, InterruptedException
+ {
+ DatabaseDescriptor.setInMemoryCompactionLimit(0);
- Table table = Table.open(KSNAME);
- ColumnFamilyStore cfs = table.getColumnFamilyStore(CFNAME);
++ Keyspace ks = Keyspace.open(KSNAME);
++ ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
+ cfs.metadata.gcGraceSeconds(2);
+
+ String key = "7808_1";
+ RowMutation rm;
+ rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
+ for (int i = 0; i < 40; i += 2)
+ add(rm, i, 0);
+ rm.apply();
+ cfs.forceBlockingFlush();
+ rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
+ ColumnFamily cf = rm.addOrGet(CFNAME);
+ cf.delete(new DeletionInfo(1, 1));
+ rm.apply();
+ cfs.forceBlockingFlush();
+ Thread.sleep(5);
+ cfs.forceMajorCompaction();
+ }
+
+ @Test
+ public void test7808_2() throws ExecutionException, InterruptedException,
IOException
+ {
+ DatabaseDescriptor.setInMemoryCompactionLimit(0);
- Table table = Table.open(KSNAME);
- ColumnFamilyStore cfs = table.getColumnFamilyStore(CFNAME);
++ Keyspace ks = Keyspace.open(KSNAME);
++ ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
+ cfs.metadata.gcGraceSeconds(2);
+
+ String key = "7808_2";
+ RowMutation rm;
+ rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
+ for (int i = 10; i < 20; i++)
+ add(rm, i, 0);
+ rm.apply();
+ cfs.forceBlockingFlush();
+
+ rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
+ ColumnFamily cf = rm.addOrGet(CFNAME);
+ cf.delete(new DeletionInfo(0,0));
+ rm.apply();
+
+ rm = new RowMutation(KSNAME, ByteBufferUtil.bytes(key));
+ add(rm, 5, 1);
+ rm.apply();
+
+ cfs.forceBlockingFlush();
+ Thread.sleep(5);
+ cfs.forceMajorCompaction();
- assertEquals(1, Util.getColumnFamily(table, Util.dk(key),
CFNAME).getColumnCount());
++ assertEquals(1, Util.getColumnFamily(ks, Util.dk(key),
CFNAME).getColumnCount());
+ }
+
+ @Test
public void overlappingRangeTest() throws Exception
{
CompactionManager.instance.disableAutoCompaction();