Merge branch 'cassandra-2.2' into cassandra-3.0
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/f4ae3448 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/f4ae3448 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/f4ae3448 Branch: refs/heads/trunk Commit: f4ae344871bbf6a57e4e4d40ec92527a172402a7 Parents: 747e5fd 073f062 Author: Marcus Eriksson <[email protected]> Authored: Fri Nov 20 11:06:48 2015 +0100 Committer: Marcus Eriksson <[email protected]> Committed: Fri Nov 20 11:06:48 2015 +0100 ---------------------------------------------------------------------- CHANGES.txt | 1 + pylib/cqlshlib/cql3handling.py | 1 + .../DateTieredCompactionStrategy.java | 34 +++++++++++--------- .../DateTieredCompactionStrategyOptions.java | 30 +++++++++++++++-- .../DateTieredCompactionStrategyTest.java | 34 ++++++++++++-------- 5 files changed, 70 insertions(+), 30 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/f4ae3448/CHANGES.txt ---------------------------------------------------------------------- diff --cc CHANGES.txt index 5e44f27,867226f..c3469bc --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -9,10 -3,21 +9,11 @@@ Merged from 2.2 * Fix SimpleDateType type compatibility (CASSANDRA-10027) * (Hadoop) fix splits calculation (CASSANDRA-10640) * (Hadoop) ensure that Cluster instances are always closed (CASSANDRA-10058) - * (cqlsh) show partial trace if incomplete after max_trace_wait (CASSANDRA-7645) - * Use most up-to-date version of schema for system tables (CASSANDRA-10652) - * Deprecate memory_allocator in cassandra.yaml (CASSANDRA-10581,10628) - * Expose phi values from failure detector via JMX and tweak debug - and trace logging (CASSANDRA-9526) - * Fix RangeNamesQueryPager (CASSANDRA-10509) - * Deprecate Pig support (CASSANDRA-10542) - * Reduce contention getting instances of CompositeType (CASSANDRA-10433) Merged from 2.1: + * Limit window size in DTCS (CASSANDRA-10280) * sstableloader does not use MAX_HEAP_SIZE env parameter (CASSANDRA-10188) * (cqlsh) Improve COPY TO performance and error handling (CASSANDRA-9304) - * Don't remove level info when running upgradesstables (CASSANDRA-10692) * Create compression chunk for sending file only (CASSANDRA-10680) - * Make buffered read size configurable (CASSANDRA-10249) * Forbid compact clustering column type changes in ALTER TABLE (CASSANDRA-8879) * Reject incremental repair with subrange repair (CASSANDRA-10422) * Add a nodetool command to refresh size_estimates (CASSANDRA-9579) http://git-wip-us.apache.org/repos/asf/cassandra/blob/f4ae3448/pylib/cqlshlib/cql3handling.py ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/f4ae3448/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/f4ae3448/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java ---------------------------------------------------------------------- diff --cc test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java index 01a6dfa,2fab014..22b4829 --- a/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/DateTieredCompactionStrategyTest.java @@@ -221,12 -231,12 +231,12 @@@ public class DateTieredCompactionStrate } cfs.forceBlockingFlush(); - List<SSTableReader> sstrs = new ArrayList<>(cfs.getSSTables()); + List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables()); - List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, new SizeTieredCompactionStrategyOptions()); + List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions()); assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty()); - newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, new SizeTieredCompactionStrategyOptions()); + newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions()); assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty()); assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
