Merge branch 'cassandra-2.0' into trunk Conflicts: NEWS.txt src/java/org/apache/cassandra/db/BatchlogManager.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/59c99621 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/59c99621 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/59c99621 Branch: refs/heads/trunk Commit: 59c996212c90d1d5ae8f8140a78623dcb5ad6c80 Parents: 25777e1 95f1b5f Author: Aleksey Yeschenko <alek...@apache.org> Authored: Sun Jan 5 03:26:20 2014 +0300 Committer: Aleksey Yeschenko <alek...@apache.org> Committed: Sun Jan 5 03:26:20 2014 +0300 ---------------------------------------------------------------------- CHANGES.txt | 1 + NEWS.txt | 9 ++++++ conf/cassandra.yaml | 4 +++ .../org/apache/cassandra/config/Config.java | 1 + .../cassandra/config/DatabaseDescriptor.java | 5 ++++ .../apache/cassandra/db/BatchlogManager.java | 29 +++++++++++++------- 6 files changed, 39 insertions(+), 10 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59c99621/CHANGES.txt ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59c99621/NEWS.txt ---------------------------------------------------------------------- diff --cc NEWS.txt index b1ec355,2e40e9c..14276d1 --- a/NEWS.txt +++ b/NEWS.txt @@@ -13,23 -13,16 +13,32 @@@ restore snapshots created with the prev 'sstableloader' tool. You can upgrade the file format of your snapshots using the provided 'sstableupgrade' tool. +2.1 +=== + +Upgrading +--------- + - Rolling upgrades from anything pre-2.0 is not supported. + - For leveled compaction users, 2.0 must be atleast started before + upgrading to 2.1 due to the fact that the old JSON leveled + manifest is migrated into the sstable metadata files on startup + in 2.0 and this code is gone from 2.1. + - For size-tiered compaction users, Cassandra now defaults to ignoring + the coldest 5% of sstables. This can be customized with the + cold_reads_to_omit compaction option; 0.0 omits nothing (the old + behavior) and 1.0 omits everything. + - Multithreaded compaction has been removed. + + 2.0.5 + ===== + + New features + -------- + - Batchlog replay can be, and is throttled by default now. + See batchlog_replay_throttle_in_kb setting in cassandra.yaml. + + 2.0.3 ===== http://git-wip-us.apache.org/repos/asf/cassandra/blob/59c99621/conf/cassandra.yaml ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59c99621/src/java/org/apache/cassandra/config/Config.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59c99621/src/java/org/apache/cassandra/config/DatabaseDescriptor.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59c99621/src/java/org/apache/cassandra/db/BatchlogManager.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/db/BatchlogManager.java index b103b69,cfa049a..4ce7f41 --- a/src/java/org/apache/cassandra/db/BatchlogManager.java +++ b/src/java/org/apache/cassandra/db/BatchlogManager.java @@@ -204,22 -210,24 +210,24 @@@ public class BatchlogManager implement DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(data)); int size = in.readInt(); for (int i = 0; i < size; i++) - replaySerializedMutation(Mutation.serializer.deserialize(in, VERSION), writtenAt); - replaySerializedMutation(RowMutation.serializer.deserialize(in, VERSION), writtenAt, rateLimiter); ++ replaySerializedMutation(Mutation.serializer.deserialize(in, VERSION), writtenAt, rateLimiter); } /* * We try to deliver the mutations to the replicas ourselves if they are alive and only resort to writing hints * when a replica is down or a write request times out. */ - private void replaySerializedMutation(Mutation mutation, long writtenAt) - private void replaySerializedMutation(RowMutation mutation, long writtenAt, RateLimiter rateLimiter) ++ private void replaySerializedMutation(Mutation mutation, long writtenAt, RateLimiter rateLimiter) { int ttl = calculateHintTTL(mutation, writtenAt); if (ttl <= 0) return; // the mutation isn't safe to replay. - Set<InetAddress> liveEndpoints = new HashSet<InetAddress>(); + Set<InetAddress> liveEndpoints = new HashSet<>(); String ks = mutation.getKeyspaceName(); Token<?> tk = StorageService.getPartitioner().getToken(mutation.key()); - int mutationSize = (int) RowMutation.serializer.serializedSize(mutation, VERSION); ++ int mutationSize = (int) Mutation.serializer.serializedSize(mutation, VERSION); + for (InetAddress endpoint : Iterables.concat(StorageService.instance.getNaturalEndpoints(ks, tk), StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, ks))) { @@@ -235,10 -244,10 +244,10 @@@ attemptDirectDelivery(mutation, writtenAt, liveEndpoints); } - private void attemptDirectDelivery(RowMutation mutation, long writtenAt, Set<InetAddress> endpoints) + private void attemptDirectDelivery(Mutation mutation, long writtenAt, Set<InetAddress> endpoints) { List<WriteResponseHandler> handlers = Lists.newArrayList(); - final CopyOnWriteArraySet<InetAddress> undelivered = new CopyOnWriteArraySet<InetAddress>(endpoints); + final CopyOnWriteArraySet<InetAddress> undelivered = new CopyOnWriteArraySet<>(endpoints); for (final InetAddress ep : endpoints) { Runnable callback = new Runnable()