git commit: Backport CASSANDRA-6196 to 1.2
Updated Branches: refs/heads/cassandra-1.2 a9b403e64 - 12413ad1f Backport CASSANDRA-6196 to 1.2 patch by Mikhail Stepura; reviewed by Aleksey Yeschenko for CASSANDRA-6196 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/12413ad1 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/12413ad1 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/12413ad1 Branch: refs/heads/cassandra-1.2 Commit: 12413ad1f848aacb3314d279fb4db7e2b293d066 Parents: a9b403e Author: Aleksey Yeschenko alek...@apache.org Authored: Tue Oct 22 14:21:38 2013 +0800 Committer: Aleksey Yeschenko alek...@apache.org Committed: Tue Oct 22 14:22:13 2013 +0800 -- CHANGES.txt| 1 + bin/cqlsh | 2 +- pylib/cqlshlib/cql3handling.py | 88 + 3 files changed, 3 insertions(+), 88 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 117a200..4d07d78 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,6 +1,7 @@ 1.2.12 * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Fix altering column types (CASSANDRA-6185) + * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) 1.2.11 http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/bin/cqlsh -- diff --git a/bin/cqlsh b/bin/cqlsh index 64e9b6f..5ccab3d 100755 --- a/bin/cqlsh +++ b/bin/cqlsh @@ -32,7 +32,7 @@ exit 1 from __future__ import with_statement description = CQL Shell for Apache Cassandra -version = 3.1.7 +version = 3.1.8 from StringIO import StringIO from itertools import groupby http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/pylib/cqlshlib/cql3handling.py -- diff --git a/pylib/cqlshlib/cql3handling.py b/pylib/cqlshlib/cql3handling.py index c5b449f..b9265fd 100644 --- a/pylib/cqlshlib/cql3handling.py +++ b/pylib/cqlshlib/cql3handling.py @@ -1017,92 +1017,6 @@ def create_ks_wat_completer(ctxt, cass): return ['KEYSPACE'] return ['KEYSPACE', 'SCHEMA'] -@completer_for('oldPropSpec', 'optname') -def create_ks_opt_completer(ctxt, cass): -exist_opts = ctxt.get_binding('optname', ()) -try: -stratopt = exist_opts.index('strategy_class') -except ValueError: -return ['strategy_class ='] -vals = ctxt.get_binding('optval') -stratclass = dequote_value(vals[stratopt]) -if stratclass in CqlRuleSet.replication_factor_strategies: -return ['strategy_options:replication_factor ='] -return [Hint('strategy_option_name')] - -@completer_for('oldPropSpec', 'optval') -def create_ks_optval_completer(ctxt, cass): -exist_opts = ctxt.get_binding('optname', (None,)) -if exist_opts[-1] == 'strategy_class': -return map(escape_value, CqlRuleSet.replication_strategies) -return [Hint('option_value')] - -@completer_for('newPropSpec', 'propname') -def keyspace_properties_option_name_completer(ctxt, cass): -optsseen = ctxt.get_binding('propname', ()) -if 'replication' not in optsseen: -return ['replication'] -return [durable_writes] - -@completer_for('propertyValue', 'propsimpleval') -def property_value_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname == 'durable_writes': -return ['true', 'false'] -if optname == 'replication': -return [{'class': '] -return () - -@completer_for('propertyValue', 'propmapkey') -def keyspace_properties_map_key_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname != 'replication': -return () -keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ())) -valsseen = map(dequote_value, ctxt.get_binding('propmapval', ())) -for k, v in zip(keysseen, valsseen): -if k == 'class': -repclass = v -break -else: -return ['class'] -if repclass in CqlRuleSet.replication_factor_strategies: -opts = set(('replication_factor',)) -elif repclass == 'NetworkTopologyStrategy': -return [Hint('dc_name')] -return map(escape_value, opts.difference(keysseen)) - -@completer_for('propertyValue', 'propmapval') -def keyspace_properties_map_value_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname != 'replication': -return () -currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1]) -if currentkey == 'class': -return map(escape_value, CqlRuleSet.replication_strategies) -return [Hint('value')] -
[jira] [Commented] (CASSANDRA-3578) Multithreaded commitlog
[ https://issues.apache.org/jira/browse/CASSANDRA-3578?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801534#comment-13801534 ] Alex Liu commented on CASSANDRA-3578: - {quote} The above takes a different approach, we update commit log as a part of the mutation thread and no more threads to deal with serialization {quote} It slows down the mutation thread by waiting for commitlog writing mutation is done. It's better to keep it async by using a separate thread. Multithreaded commitlog --- Key: CASSANDRA-3578 URL: https://issues.apache.org/jira/browse/CASSANDRA-3578 Project: Cassandra Issue Type: Improvement Reporter: Jonathan Ellis Assignee: Vijay Priority: Minor Labels: performance Attachments: 0001-CASSANDRA-3578.patch, ComitlogStress.java, Current-CL.png, Multi-Threded-CL.png, parallel_commit_log_2.patch Brian Aker pointed out a while ago that allowing multiple threads to modify the commitlog simultaneously (reserving space for each with a CAS first, the way we do in the SlabAllocator.Region.allocate) can improve performance, since you're not bottlenecking on a single thread to do all the copying and CRC computation. Now that we use mmap'd CommitLog segments (CASSANDRA-3411) this becomes doable. (moved from CASSANDRA-622, which was getting a bit muddled.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[2/2] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt bin/cqlsh Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dfb97657 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dfb97657 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dfb97657 Branch: refs/heads/cassandra-2.0 Commit: dfb976573dce07f856c7018c4b6f9b15dc604776 Parents: e7ec65f 12413ad Author: Aleksey Yeschenko alek...@apache.org Authored: Tue Oct 22 14:29:54 2013 +0800 Committer: Aleksey Yeschenko alek...@apache.org Committed: Tue Oct 22 14:29:54 2013 +0800 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dfb97657/CHANGES.txt -- diff --cc CHANGES.txt index 40d752c,4d07d78..e89ca41 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,34 -1,10 +1,33 @@@ -1.2.12 +2.0.2 + * Update FailureDetector to use nanontime (CASSANDRA-4925) + * Fix FileCacheService regressions (CASSANDRA-6149) + * Never return WriteTimeout for CL.ANY (CASSANDRA-6032) + * Fix race conditions in bulk loader (CASSANDRA-6129) + * Add configurable metrics reporting (CASSANDRA-4430) + * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117) + * Track and persist sstable read activity (CASSANDRA-5515) + * Fixes for speculative retry (CASSANDRA-5932) + * Improve memory usage of metadata min/max column names (CASSANDRA-6077) + * Fix thrift validation refusing row markers on CQL3 tables (CASSANDRA-6081) + * Fix insertion of collections with CAS (CASSANDRA-6069) + * Correctly send metadata on SELECT COUNT (CASSANDRA-6080) + * Track clients' remote addresses in ClientState (CASSANDRA-6070) + * Create snapshot dir if it does not exist when migrating + leveled manifest (CASSANDRA-6093) + * make sequential nodetool repair the default (CASSANDRA-5950) + * Add more hooks for compaction strategy implementations (CASSANDRA-6111) + * Fix potential NPE on composite 2ndary indexes (CASSANDRA-6098) + * Delete can potentially be skipped in batch (CASSANDRA-6115) + * Allow alter keyspace on system_traces (CASSANDRA-6016) + * Disallow empty column names in cql (CASSANDRA-6136) + * Use Java7 file-handling APIs and fix file moving on Windows (CASSANDRA-5383) + * Save compaction history to system keyspace (CASSANDRA-5078) + * Fix NPE if StorageService.getOperationMode() is executed before full startup (CASSANDRA-6166) + * CQL3: support pre-epoch longs for TimestampType (CASSANDRA-6212) + * Add reloadtriggers command to nodetool (CASSANDRA-4949) + * cqlsh: ignore empty 'value alias' in DESCRIBE (CASSANDRA-6139) - * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) +Merged from 1.2: * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) - * Fix altering column types (CASSANDRA-6185) - * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) - - -1.2.11 * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) * Mark CF clean if a mutation raced the drop and got it marked dirty @@@ -62,44 -37,9 +61,45 @@@ * Fix validation of empty column names for compact tables (CASSANDRA-6152) * Skip replaying mutations that pass CRC but fail to deserialize (CASSANDRA-6183) * Rework token replacement to use replace_address (CASSANDRA-5916) + * Fix altering column types (CASSANDRA-6185) ++ * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) -1.2.10 +2.0.1 + * Fix bug that could allow reading deleted data temporarily (CASSANDRA-6025) + * Improve memory use defaults (CASSANDRA-5069) + * Make ThriftServer more easlly extensible (CASSANDRA-6058) + * Remove Hadoop dependency from ITransportFactory (CASSANDRA-6062) + * add file_cache_size_in_mb setting (CASSANDRA-5661) + * Improve error message when yaml contains invalid properties (CASSANDRA-5958) + * Improve leveled compaction's ability to find non-overlapping L0 compactions + to work on concurrently (CASSANDRA-5921) + * Notify indexer of columns shadowed by range tombstones (CASSANDRA-5614) + * Log Merkle tree stats (CASSANDRA-2698) + * Switch from crc32 to adler32 for compressed sstable checksums (CASSANDRA-5862) + * Improve offheap memcpy performance (CASSANDRA-5884) + * Use a range aware scanner for cleanup (CASSANDRA-2524) + * Cleanup doesn't need to inspect sstables that contain only local data + (CASSANDRA-5722) + * Add ability for CQL3 to list partition keys (CASSANDRA-4536) + * Improve native protocol serialization (CASSANDRA-5664) + * Upgrade Thrift to 0.9.1 (CASSANDRA-5923) + * Require superuser status for adding triggers
[1/2] git commit: Backport CASSANDRA-6196 to 1.2
Updated Branches: refs/heads/cassandra-2.0 e7ec65f66 - dfb976573 Backport CASSANDRA-6196 to 1.2 patch by Mikhail Stepura; reviewed by Aleksey Yeschenko for CASSANDRA-6196 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/12413ad1 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/12413ad1 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/12413ad1 Branch: refs/heads/cassandra-2.0 Commit: 12413ad1f848aacb3314d279fb4db7e2b293d066 Parents: a9b403e Author: Aleksey Yeschenko alek...@apache.org Authored: Tue Oct 22 14:21:38 2013 +0800 Committer: Aleksey Yeschenko alek...@apache.org Committed: Tue Oct 22 14:22:13 2013 +0800 -- CHANGES.txt| 1 + bin/cqlsh | 2 +- pylib/cqlshlib/cql3handling.py | 88 + 3 files changed, 3 insertions(+), 88 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 117a200..4d07d78 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,6 +1,7 @@ 1.2.12 * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Fix altering column types (CASSANDRA-6185) + * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) 1.2.11 http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/bin/cqlsh -- diff --git a/bin/cqlsh b/bin/cqlsh index 64e9b6f..5ccab3d 100755 --- a/bin/cqlsh +++ b/bin/cqlsh @@ -32,7 +32,7 @@ exit 1 from __future__ import with_statement description = CQL Shell for Apache Cassandra -version = 3.1.7 +version = 3.1.8 from StringIO import StringIO from itertools import groupby http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/pylib/cqlshlib/cql3handling.py -- diff --git a/pylib/cqlshlib/cql3handling.py b/pylib/cqlshlib/cql3handling.py index c5b449f..b9265fd 100644 --- a/pylib/cqlshlib/cql3handling.py +++ b/pylib/cqlshlib/cql3handling.py @@ -1017,92 +1017,6 @@ def create_ks_wat_completer(ctxt, cass): return ['KEYSPACE'] return ['KEYSPACE', 'SCHEMA'] -@completer_for('oldPropSpec', 'optname') -def create_ks_opt_completer(ctxt, cass): -exist_opts = ctxt.get_binding('optname', ()) -try: -stratopt = exist_opts.index('strategy_class') -except ValueError: -return ['strategy_class ='] -vals = ctxt.get_binding('optval') -stratclass = dequote_value(vals[stratopt]) -if stratclass in CqlRuleSet.replication_factor_strategies: -return ['strategy_options:replication_factor ='] -return [Hint('strategy_option_name')] - -@completer_for('oldPropSpec', 'optval') -def create_ks_optval_completer(ctxt, cass): -exist_opts = ctxt.get_binding('optname', (None,)) -if exist_opts[-1] == 'strategy_class': -return map(escape_value, CqlRuleSet.replication_strategies) -return [Hint('option_value')] - -@completer_for('newPropSpec', 'propname') -def keyspace_properties_option_name_completer(ctxt, cass): -optsseen = ctxt.get_binding('propname', ()) -if 'replication' not in optsseen: -return ['replication'] -return [durable_writes] - -@completer_for('propertyValue', 'propsimpleval') -def property_value_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname == 'durable_writes': -return ['true', 'false'] -if optname == 'replication': -return [{'class': '] -return () - -@completer_for('propertyValue', 'propmapkey') -def keyspace_properties_map_key_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname != 'replication': -return () -keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ())) -valsseen = map(dequote_value, ctxt.get_binding('propmapval', ())) -for k, v in zip(keysseen, valsseen): -if k == 'class': -repclass = v -break -else: -return ['class'] -if repclass in CqlRuleSet.replication_factor_strategies: -opts = set(('replication_factor',)) -elif repclass == 'NetworkTopologyStrategy': -return [Hint('dc_name')] -return map(escape_value, opts.difference(keysseen)) - -@completer_for('propertyValue', 'propmapval') -def keyspace_properties_map_value_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname != 'replication': -return () -currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1]) -if currentkey == 'class': -return map(escape_value, CqlRuleSet.replication_strategies) -return [Hint('value')] -
[3/3] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/0bff97a2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/0bff97a2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/0bff97a2 Branch: refs/heads/trunk Commit: 0bff97a2ca5ffd8d6168b7be424cc05fe9039830 Parents: db9bc69 dfb9765 Author: Aleksey Yeschenko alek...@apache.org Authored: Tue Oct 22 14:30:28 2013 +0800 Committer: Aleksey Yeschenko alek...@apache.org Committed: Tue Oct 22 14:30:28 2013 +0800 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/0bff97a2/CHANGES.txt --
[1/3] git commit: Backport CASSANDRA-6196 to 1.2
Updated Branches: refs/heads/trunk db9bc6929 - 0bff97a2c Backport CASSANDRA-6196 to 1.2 patch by Mikhail Stepura; reviewed by Aleksey Yeschenko for CASSANDRA-6196 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/12413ad1 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/12413ad1 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/12413ad1 Branch: refs/heads/trunk Commit: 12413ad1f848aacb3314d279fb4db7e2b293d066 Parents: a9b403e Author: Aleksey Yeschenko alek...@apache.org Authored: Tue Oct 22 14:21:38 2013 +0800 Committer: Aleksey Yeschenko alek...@apache.org Committed: Tue Oct 22 14:22:13 2013 +0800 -- CHANGES.txt| 1 + bin/cqlsh | 2 +- pylib/cqlshlib/cql3handling.py | 88 + 3 files changed, 3 insertions(+), 88 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 117a200..4d07d78 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,6 +1,7 @@ 1.2.12 * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Fix altering column types (CASSANDRA-6185) + * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) 1.2.11 http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/bin/cqlsh -- diff --git a/bin/cqlsh b/bin/cqlsh index 64e9b6f..5ccab3d 100755 --- a/bin/cqlsh +++ b/bin/cqlsh @@ -32,7 +32,7 @@ exit 1 from __future__ import with_statement description = CQL Shell for Apache Cassandra -version = 3.1.7 +version = 3.1.8 from StringIO import StringIO from itertools import groupby http://git-wip-us.apache.org/repos/asf/cassandra/blob/12413ad1/pylib/cqlshlib/cql3handling.py -- diff --git a/pylib/cqlshlib/cql3handling.py b/pylib/cqlshlib/cql3handling.py index c5b449f..b9265fd 100644 --- a/pylib/cqlshlib/cql3handling.py +++ b/pylib/cqlshlib/cql3handling.py @@ -1017,92 +1017,6 @@ def create_ks_wat_completer(ctxt, cass): return ['KEYSPACE'] return ['KEYSPACE', 'SCHEMA'] -@completer_for('oldPropSpec', 'optname') -def create_ks_opt_completer(ctxt, cass): -exist_opts = ctxt.get_binding('optname', ()) -try: -stratopt = exist_opts.index('strategy_class') -except ValueError: -return ['strategy_class ='] -vals = ctxt.get_binding('optval') -stratclass = dequote_value(vals[stratopt]) -if stratclass in CqlRuleSet.replication_factor_strategies: -return ['strategy_options:replication_factor ='] -return [Hint('strategy_option_name')] - -@completer_for('oldPropSpec', 'optval') -def create_ks_optval_completer(ctxt, cass): -exist_opts = ctxt.get_binding('optname', (None,)) -if exist_opts[-1] == 'strategy_class': -return map(escape_value, CqlRuleSet.replication_strategies) -return [Hint('option_value')] - -@completer_for('newPropSpec', 'propname') -def keyspace_properties_option_name_completer(ctxt, cass): -optsseen = ctxt.get_binding('propname', ()) -if 'replication' not in optsseen: -return ['replication'] -return [durable_writes] - -@completer_for('propertyValue', 'propsimpleval') -def property_value_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname == 'durable_writes': -return ['true', 'false'] -if optname == 'replication': -return [{'class': '] -return () - -@completer_for('propertyValue', 'propmapkey') -def keyspace_properties_map_key_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname != 'replication': -return () -keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ())) -valsseen = map(dequote_value, ctxt.get_binding('propmapval', ())) -for k, v in zip(keysseen, valsseen): -if k == 'class': -repclass = v -break -else: -return ['class'] -if repclass in CqlRuleSet.replication_factor_strategies: -opts = set(('replication_factor',)) -elif repclass == 'NetworkTopologyStrategy': -return [Hint('dc_name')] -return map(escape_value, opts.difference(keysseen)) - -@completer_for('propertyValue', 'propmapval') -def keyspace_properties_map_value_completer(ctxt, cass): -optname = ctxt.get_binding('propname')[-1] -if optname != 'replication': -return () -currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1]) -if currentkey == 'class': -return map(escape_value, CqlRuleSet.replication_strategies) -return [Hint('value')] - -@completer_for('propertyValue', 'ender')
[jira] [Commented] (CASSANDRA-3578) Multithreaded commitlog
[ https://issues.apache.org/jira/browse/CASSANDRA-3578?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801536#comment-13801536 ] Vijay commented on CASSANDRA-3578: -- {quote} It slows down the mutation thread by waiting for commitlog writing mutation is done {quote} Well depends on where you are bottlenecking, updating mmap buffer is not that expensive and its usually cpu intensive, in the other hand it reduces the variability as shown in the stress. Multithreaded commitlog --- Key: CASSANDRA-3578 URL: https://issues.apache.org/jira/browse/CASSANDRA-3578 Project: Cassandra Issue Type: Improvement Reporter: Jonathan Ellis Assignee: Vijay Priority: Minor Labels: performance Attachments: 0001-CASSANDRA-3578.patch, ComitlogStress.java, Current-CL.png, Multi-Threded-CL.png, parallel_commit_log_2.patch Brian Aker pointed out a while ago that allowing multiple threads to modify the commitlog simultaneously (reserving space for each with a CAS first, the way we do in the SlabAllocator.Region.allocate) can improve performance, since you're not bottlenecking on a single thread to do all the copying and CRC computation. Now that we use mmap'd CommitLog segments (CASSANDRA-3411) this becomes doable. (moved from CASSANDRA-622, which was getting a bit muddled.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[2/3] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt bin/cqlsh Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dfb97657 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dfb97657 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dfb97657 Branch: refs/heads/trunk Commit: dfb976573dce07f856c7018c4b6f9b15dc604776 Parents: e7ec65f 12413ad Author: Aleksey Yeschenko alek...@apache.org Authored: Tue Oct 22 14:29:54 2013 +0800 Committer: Aleksey Yeschenko alek...@apache.org Committed: Tue Oct 22 14:29:54 2013 +0800 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dfb97657/CHANGES.txt -- diff --cc CHANGES.txt index 40d752c,4d07d78..e89ca41 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,34 -1,10 +1,33 @@@ -1.2.12 +2.0.2 + * Update FailureDetector to use nanontime (CASSANDRA-4925) + * Fix FileCacheService regressions (CASSANDRA-6149) + * Never return WriteTimeout for CL.ANY (CASSANDRA-6032) + * Fix race conditions in bulk loader (CASSANDRA-6129) + * Add configurable metrics reporting (CASSANDRA-4430) + * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117) + * Track and persist sstable read activity (CASSANDRA-5515) + * Fixes for speculative retry (CASSANDRA-5932) + * Improve memory usage of metadata min/max column names (CASSANDRA-6077) + * Fix thrift validation refusing row markers on CQL3 tables (CASSANDRA-6081) + * Fix insertion of collections with CAS (CASSANDRA-6069) + * Correctly send metadata on SELECT COUNT (CASSANDRA-6080) + * Track clients' remote addresses in ClientState (CASSANDRA-6070) + * Create snapshot dir if it does not exist when migrating + leveled manifest (CASSANDRA-6093) + * make sequential nodetool repair the default (CASSANDRA-5950) + * Add more hooks for compaction strategy implementations (CASSANDRA-6111) + * Fix potential NPE on composite 2ndary indexes (CASSANDRA-6098) + * Delete can potentially be skipped in batch (CASSANDRA-6115) + * Allow alter keyspace on system_traces (CASSANDRA-6016) + * Disallow empty column names in cql (CASSANDRA-6136) + * Use Java7 file-handling APIs and fix file moving on Windows (CASSANDRA-5383) + * Save compaction history to system keyspace (CASSANDRA-5078) + * Fix NPE if StorageService.getOperationMode() is executed before full startup (CASSANDRA-6166) + * CQL3: support pre-epoch longs for TimestampType (CASSANDRA-6212) + * Add reloadtriggers command to nodetool (CASSANDRA-4949) + * cqlsh: ignore empty 'value alias' in DESCRIBE (CASSANDRA-6139) - * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) +Merged from 1.2: * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) - * Fix altering column types (CASSANDRA-6185) - * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) - - -1.2.11 * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) * Mark CF clean if a mutation raced the drop and got it marked dirty @@@ -62,44 -37,9 +61,45 @@@ * Fix validation of empty column names for compact tables (CASSANDRA-6152) * Skip replaying mutations that pass CRC but fail to deserialize (CASSANDRA-6183) * Rework token replacement to use replace_address (CASSANDRA-5916) + * Fix altering column types (CASSANDRA-6185) ++ * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) -1.2.10 +2.0.1 + * Fix bug that could allow reading deleted data temporarily (CASSANDRA-6025) + * Improve memory use defaults (CASSANDRA-5069) + * Make ThriftServer more easlly extensible (CASSANDRA-6058) + * Remove Hadoop dependency from ITransportFactory (CASSANDRA-6062) + * add file_cache_size_in_mb setting (CASSANDRA-5661) + * Improve error message when yaml contains invalid properties (CASSANDRA-5958) + * Improve leveled compaction's ability to find non-overlapping L0 compactions + to work on concurrently (CASSANDRA-5921) + * Notify indexer of columns shadowed by range tombstones (CASSANDRA-5614) + * Log Merkle tree stats (CASSANDRA-2698) + * Switch from crc32 to adler32 for compressed sstable checksums (CASSANDRA-5862) + * Improve offheap memcpy performance (CASSANDRA-5884) + * Use a range aware scanner for cleanup (CASSANDRA-2524) + * Cleanup doesn't need to inspect sstables that contain only local data + (CASSANDRA-5722) + * Add ability for CQL3 to list partition keys (CASSANDRA-4536) + * Improve native protocol serialization (CASSANDRA-5664) + * Upgrade Thrift to 0.9.1 (CASSANDRA-5923) + * Require superuser status for adding triggers (CASSANDRA-5963)
[jira] [Updated] (CASSANDRA-6196) Add compaction, compression to cqlsh tab completion for CREATE TABLE
[ https://issues.apache.org/jira/browse/CASSANDRA-6196?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Aleksey Yeschenko updated CASSANDRA-6196: - Fix Version/s: 1.2.12 Add compaction, compression to cqlsh tab completion for CREATE TABLE Key: CASSANDRA-6196 URL: https://issues.apache.org/jira/browse/CASSANDRA-6196 Project: Cassandra Issue Type: Bug Components: Tools Reporter: Jonathan Ellis Assignee: Mikhail Stepura Priority: Minor Fix For: 1.2.12, 2.0.2 Attachments: cassandra-1.2-6196.patch, cassandra-2.0-6196.patch -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6196) Add compaction, compression to cqlsh tab completion for CREATE TABLE
[ https://issues.apache.org/jira/browse/CASSANDRA-6196?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801537#comment-13801537 ] Aleksey Yeschenko commented on CASSANDRA-6196: -- Committed the back port, thanks. Add compaction, compression to cqlsh tab completion for CREATE TABLE Key: CASSANDRA-6196 URL: https://issues.apache.org/jira/browse/CASSANDRA-6196 Project: Cassandra Issue Type: Bug Components: Tools Reporter: Jonathan Ellis Assignee: Mikhail Stepura Priority: Minor Fix For: 1.2.12, 2.0.2 Attachments: cassandra-1.2-6196.patch, cassandra-2.0-6196.patch -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6142) Remove multithreaded compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6142?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801548#comment-13801548 ] Marcus Eriksson commented on CASSANDRA-6142: looks good to me regarding saveOutOfOrderRows, i guess a solution would be to flush a new sstable from the TreeSet when its size exceeds some limit? Unsure how common this is though. Remove multithreaded compaction --- Key: CASSANDRA-6142 URL: https://issues.apache.org/jira/browse/CASSANDRA-6142 Project: Cassandra Issue Type: Bug Components: Core Reporter: Jonathan Ellis Assignee: Jonathan Ellis Priority: Minor Fix For: 2.1 There is at best a very small sweet spot for multithreaded compaction (ParallelCompactionIterable). For large rows, we stall the pipeline and fall back to a single LCR pass. For small rows, the overhead of the coordination outweighs the benefits of parallelization (45s to compact 2x1M stress rows with multithreading enabled, vs 35 with it disabled). -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6202) Add a LOCAL_ONE consistency level
[ https://issues.apache.org/jira/browse/CASSANDRA-6202?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Cyril Scetbon updated CASSANDRA-6202: - Fix Version/s: (was: 1.2.12) 1.2.11 Add a LOCAL_ONE consistency level - Key: CASSANDRA-6202 URL: https://issues.apache.org/jira/browse/CASSANDRA-6202 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Brandon Williams Assignee: Jason Brown Labels: core, thrift Fix For: 1.2.11 Attachments: 6202_1.2-v2.diff, 6202_2.0-v2.diff, 6202-v1.diff There are an increasing amount of use cases where ONE is desirable, but cross-DC traffic is not. LOCAL_ONE would solves this by reading at ONE, but only within the DC the coordinator is in. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6181) Replaying a commit led to java.lang.StackOverflowError and node crash
[ https://issues.apache.org/jira/browse/CASSANDRA-6181?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Sylvain Lebresne updated CASSANDRA-6181: Attachment: 6181.txt I unfortunately haven't been to reproduce with the commit log from Jeffrey. That being said, looking at the stacktrace more closely, I don't think that this is an infinite loop. Rather, in some insertion cases, we have to iterate over all (or a large part) of the range tombstones and that is currently done recursively so this can blow up the stack. The blow-up does reproduce rather easily in a unit test (with 3K range tombstone, which is not small, but not all that much). I though we would be unlikely to run into that case with the way range tombstones are used in practice, but I suppose that's still possible if you have multiple clustering columns so maybe that's just that. Anyway, I don't really another fix than to rewrite the logic non-recursively. Attaching a patch for this. This is probably a little bit more involved that what I'd like to push in 1.2 at this point, but at same I don't think there is any simpler way to fix this. On the bright side, RangeTombstoneList is relatively well covered by unit tests. [~exabytes18], [~jdamick]: If you guys could check that the attached patch does fix this for you, that would be awesome. Replaying a commit led to java.lang.StackOverflowError and node crash - Key: CASSANDRA-6181 URL: https://issues.apache.org/jira/browse/CASSANDRA-6181 Project: Cassandra Issue Type: Bug Environment: 1.2.8 1.2.10 - ubuntu 12.04 Reporter: Jeffrey Damick Assignee: Sylvain Lebresne Priority: Critical Fix For: 1.2.12 Attachments: 6181.txt 2 of our nodes died after attempting to replay a commit. I can attach the commit log file if that helps. It was occurring on 1.2.8, after several failed attempts to start, we attempted startup with 1.2.10. This also yielded the same issue (below). The only resolution was to physically move the commit log file out of the way and then the nodes were able to start... The replication factor was 3 so I'm hoping there was no data loss... {code} INFO [main] 2013-10-11 14:50:35,891 CommitLogReplayer.java (line 119) Replaying /ebs/cassandra/commitlog/CommitLog-2-1377542389560.log ERROR [MutationStage:18] 2013-10-11 14:50:37,387 CassandraDaemon.java (line 191) Exception in thread Thread[MutationStage:18,5,main] java.lang.StackOverflowError at org.apache.cassandra.db.marshal.TimeUUIDType.compareTimestampBytes(TimeUUIDType.java:68) at org.apache.cassandra.db.marshal.TimeUUIDType.compare(TimeUUIDType.java:57) at org.apache.cassandra.db.marshal.TimeUUIDType.compare(TimeUUIDType.java:29) at org.apache.cassandra.db.marshal.AbstractType.compareCollectionMembers(AbstractType.java:229) at org.apache.cassandra.db.marshal.AbstractCompositeType.compare(AbstractCompositeType.java:81) at org.apache.cassandra.db.marshal.AbstractCompositeType.compare(AbstractCompositeType.java:31) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:439) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) etc over and over until at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.add(RangeTombstoneList.java:144) at org.apache.cassandra.db.RangeTombstoneList.addAll(RangeTombstoneList.java:186) at org.apache.cassandra.db.DeletionInfo.add(DeletionInfo.java:180) at org.apache.cassandra.db.AtomicSortedColumns.addAllWithSizeDelta(AtomicSortedColumns.java:197) at
[jira] [Commented] (CASSANDRA-6224) CQL3 Column family / tables disappear, get unconfigured columnfamily errors
[ https://issues.apache.org/jira/browse/CASSANDRA-6224?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801668#comment-13801668 ] Sylvain Lebresne commented on CASSANDRA-6224: - I'm afraid we'd need a proper way to reproduce to go as that stracktrace dones't really tell us anything. And as far as I can tell, so far no-one else has reported having table disappearing and that's hardly something that would go unoticed if that happened. In particular, I'd suggest really checking that it's not a client side issue, like some code dropping the table by mistake. Or maybe the client not checking for schema agreement before trying to query (and thus potentially reaching a node that is not up to date on the schema). CQL3 Column family / tables disappear, get unconfigured columnfamily errors - Key: CASSANDRA-6224 URL: https://issues.apache.org/jira/browse/CASSANDRA-6224 Project: Cassandra Issue Type: Bug Components: API, Core Environment: Cassandra 2.0.1 Amazon AWS Ubuntu Single-node Reporter: Constance Eustace We're seeing CQL3 tables seemingly arbitrarily disappear. Need to repair for prod meant we reconstructed the affected schema before nodetool repairs or similar attempts were done. It seems to take a few days to appear. Volumes are not tremendously high yet... Caused by: java.sql.SQLSyntaxErrorException: InvalidRequestException(why:unconfigured columnfamily entity_hierarchydef) at org.apache.cassandra.cql.jdbc.CassandraPreparedStatement.init(CassandraPreparedStatement.java:103) ~[cassandra-jdbc-1.2.5.jar:na] at org.apache.cassandra.cql.jdbc.CassandraConnection.prepareStatement(CassandraConnection.java:388) ~[cassandra-jdbc-1.2.5.jar:na] at org.apache.cassandra.cql.jdbc.CassandraConnection.prepareStatement(CassandraConnection.java:372) ~[cassandra-jdbc-1.2.5.jar:na] at org.apache.cassandra.cql.jdbc.CassandraConnection.prepareStatement(CassandraConnection.java:50) ~[cassandra-jdbc-1.2.5.jar:na] at org.apache.commons.dbcp.DelegatingConnection.prepareStatement(DelegatingConnection.java:281) ~[commons-dbcp-1.3.jar:1.3] at org.apache.commons.dbcp.PoolingDataSource$PoolGuardConnectionWrapper.prepareStatement(PoolingDataSource.java:313) ~[commons-dbcp-1.3.jar:1.3] at com.bestbuy.contentsystems.cupcake.storage.cassandra.cqlentity.CQL.tool.CassPSC.createPreparedStatement(CassPSC.java:61) ~[ingest-storage-QA-SNAPSHOT.jar:QA-SNAPSHOT] at org.springframework.jdbc.core.JdbcTemplate.execute(JdbcTemplate.java:583) ~[spring-jdbc-3.2.4.RELEASE.jar:3.2.4.RELEASE] ... 148 common frames omitted Caused by: org.apache.cassandra.thrift.InvalidRequestException: null at org.apache.cassandra.thrift.Cassandra$prepare_cql3_query_result.read(Cassandra.java:39567) ~[cassandra-thrift-1.2.8.jar:1.2.8] at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:78) ~[libthrift-0.7.0.jar:0.7.0] at org.apache.cassandra.thrift.Cassandra$Client.recv_prepare_cql3_query(Cassandra.java:1625) ~[cassandra-thrift-1.2.8.jar:1.2.8] at org.apache.cassandra.thrift.Cassandra$Client.prepare_cql3_query(Cassandra.java:1611) ~[cassandra-thrift-1.2.8.jar:1.2.8] at org.apache.cassandra.cql.jdbc.CassandraConnection.prepare(CassandraConnection.java:517) ~[cassandra-jdbc-1.2.5.jar:na] at org.apache.cassandra.cql.jdbc.CassandraConnection.prepare(CassandraConnection.java:532) ~[cassandra-jdbc-1.2.5.jar:na] at org.apache.cassandra.cql.jdbc.CassandraPreparedStatement.init(CassandraPreparedStatement.java:96) ~[cassandra-jdbc-1.2.5.jar:na] -- This message was sent by Atlassian JIRA (v6.1#6144)
Git Push Summary
Updated Tags: refs/tags/1.2.11-tentative [deleted] 942444971
Git Push Summary
Updated Tags: refs/tags/cassandra-1.2.11 [created] d0627cc37
[jira] [Created] (CASSANDRA-6227) Add 'WHERE 0=1', 'WHERE false', in SQL-compatible manner
Sergey Nagaytsev created CASSANDRA-6227: --- Summary: Add 'WHERE 0=1', 'WHERE false', in SQL-compatible manner Key: CASSANDRA-6227 URL: https://issues.apache.org/jira/browse/CASSANDRA-6227 Project: Cassandra Issue Type: New Feature Environment: Yii2 PHP framework database abstarction level Reporter: Sergey Nagaytsev Then DBAL thinks query result must be empty, it's deep inner method returns something like '0=1', 'FALSE' etc. for inclusion into WHERE condition, to make result set empty. Nothing of this is recognised by Cassandra - instead, error is returned. Please, implement 'false', 'null', constant comparison etc. as valid parts of WHERE with logic -- This message was sent by Atlassian JIRA (v6.1#6144)
svn commit: r1534597 - in /cassandra/site: publish/download/index.html src/settings.py
Author: slebresne Date: Tue Oct 22 10:48:28 2013 New Revision: 1534597 URL: http://svn.apache.org/r1534597 Log: Update website for 1.2.11 release Modified: cassandra/site/publish/download/index.html cassandra/site/src/settings.py Modified: cassandra/site/publish/download/index.html URL: http://svn.apache.org/viewvc/cassandra/site/publish/download/index.html?rev=1534597r1=1534596r2=1534597view=diff == --- cassandra/site/publish/download/index.html (original) +++ cassandra/site/publish/download/index.html Tue Oct 22 10:48:28 2013 @@ -102,16 +102,16 @@ p Previous stable branches of Cassandra continue to see periodic maintenance for some time after a new major release is made. The lastest release on the - 1.2 branch is 1.2.10 (released on - 2013-09-23). + 1.2 branch is 1.2.11 (released on + 2013-10-22). /p ul li -a class=filename href=http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.10/apache-cassandra-1.2.10-bin.tar.gz;apache-cassandra-1.2.10-bin.tar.gz/a -[a href=http://www.apache.org/dist/cassandra/1.2.10/apache-cassandra-1.2.10-bin.tar.gz.asc;PGP/a] -[a href=http://www.apache.org/dist/cassandra/1.2.10/apache-cassandra-1.2.10-bin.tar.gz.md5;MD5/a] -[a href=http://www.apache.org/dist/cassandra/1.2.10/apache-cassandra-1.2.10-bin.tar.gz.sha1;SHA1/a] +a class=filename href=http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.11/apache-cassandra-1.2.11-bin.tar.gz;apache-cassandra-1.2.11-bin.tar.gz/a +[a href=http://www.apache.org/dist/cassandra/1.2.11/apache-cassandra-1.2.11-bin.tar.gz.asc;PGP/a] +[a href=http://www.apache.org/dist/cassandra/1.2.11/apache-cassandra-1.2.11-bin.tar.gz.md5;MD5/a] +[a href=http://www.apache.org/dist/cassandra/1.2.11/apache-cassandra-1.2.11-bin.tar.gz.sha1;SHA1/a] /li /ul @@ -154,10 +154,10 @@ /li li -a class=filename href=http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.10/apache-cassandra-1.2.10-src.tar.gz;apache-cassandra-1.2.10-src.tar.gz/a -[a href=http://www.apache.org/dist/cassandra/1.2.10/apache-cassandra-1.2.10-src.tar.gz.asc;PGP/a] -[a href=http://www.apache.org/dist/cassandra/1.2.10/apache-cassandra-1.2.10-src.tar.gz.md5;MD5/a] -[a href=http://www.apache.org/dist/cassandra/1.2.10/apache-cassandra-1.2.10-src.tar.gz.sha1;SHA1/a] +a class=filename href=http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.11/apache-cassandra-1.2.11-src.tar.gz;apache-cassandra-1.2.11-src.tar.gz/a +[a href=http://www.apache.org/dist/cassandra/1.2.11/apache-cassandra-1.2.11-src.tar.gz.asc;PGP/a] +[a href=http://www.apache.org/dist/cassandra/1.2.11/apache-cassandra-1.2.11-src.tar.gz.md5;MD5/a] +[a href=http://www.apache.org/dist/cassandra/1.2.11/apache-cassandra-1.2.11-src.tar.gz.sha1;SHA1/a] /li Modified: cassandra/site/src/settings.py URL: http://svn.apache.org/viewvc/cassandra/site/src/settings.py?rev=1534597r1=1534596r2=1534597view=diff == --- cassandra/site/src/settings.py (original) +++ cassandra/site/src/settings.py Tue Oct 22 10:48:28 2013 @@ -92,8 +92,8 @@ SITE_POST_PROCESSORS = { } class CassandraDef(object): -oldstable_version = '1.2.10' -oldstable_release_date = '2013-09-23' +oldstable_version = '1.2.11' +oldstable_release_date = '2013-10-22' oldstable_exists = True veryoldstable_version = '1.1.12' veryoldstable_release_date = '2013-05-27'
[jira] [Commented] (CASSANDRA-6179) Load calculated in nodetool info is strange/inaccurate in JBOD setups
[ https://issues.apache.org/jira/browse/CASSANDRA-6179?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801738#comment-13801738 ] Jonathan Ellis commented on CASSANDRA-6179: --- Should we add snapshot space used to nodetool info to make this more obvious? Load calculated in nodetool info is strange/inaccurate in JBOD setups - Key: CASSANDRA-6179 URL: https://issues.apache.org/jira/browse/CASSANDRA-6179 Project: Cassandra Issue Type: Bug Environment: JBOD layouts Reporter: J. Ryan Earl Attachments: dump.txt We recently noticed that the storage capacity on Cassandra nodes using JBOD layout was returning what looks close to the average data volume size, instead of the sum of all JBOD data volumes. It's not exactly an average and I haven't had time to dig into the code to see what it's really doing, it's like some sort of sample of the JBOD volumes sizes. So looking at the JBOD volumes we see: {noformat} [jre@cassandra2 ~]$ df -h FilesystemSize Used Avail Use% Mounted on [...] /dev/sdc1 1.1T 9.4G 1.1T 1% /data/1 /dev/sdd1 1.1T 9.2G 1.1T 1% /data/2 /dev/sde1 1.1T 11G 1.1T 1% /data/3 /dev/sdf1 1.1T 11G 1.1T 1% /data/4 /dev/sdg1 1.1T 9.2G 1.1T 1% /data/5 /dev/sdh1 1.1T 11G 1.1T 1% /data/6 /dev/sdi1 1.1T 9.8G 1.1T 1% /data/7 {noformat} Looking at 'nodetool info' we see: {noformat} [jre@cassandra2 ~]$ nodetool info Token: (invoke with -T/--tokens to see all 256 tokens) ID : 631f0be3-ce52-4eb9-b48b-069fbfdf0a97 Gossip active: true Thrift active: true Native Transport active: true Load : 10.57 GB {noformat} So there are 7 disks in a JBOD configuration in this example, the sum should be closer to 70G for each node. Maybe we're misinterpreting what this value should be, but things like OpsCenter appear to use this load value as the size of data on the local node, which I expect to be the sum of JBOD volumes. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6181) Replaying a commit led to java.lang.StackOverflowError and node crash
[ https://issues.apache.org/jira/browse/CASSANDRA-6181?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801772#comment-13801772 ] Fabien Rousseau commented on CASSANDRA-6181: Don't know if this can help but, unit tests do not set -Xss parameter and uses the default value (1M in general, see http://www.oracle.com/technetwork/java/javase/tech/vmoptions-jsp-140102.html ) while default Xss is 180k for Cassandra1.2 Here are some raw numbers using unit tests and setting Xss in the build.xml : N = 193; // 180k N = 313; // 228k N = 394; // 256k N = 1036; // 512k N = 2321; // 1024k This number represents the number of inserted tombstones before the tests starts failing (ie : incrementing this number by one : tests fails with SOException) This should probably explain why it was not reproducible. By the way, maybe the unit tests should set the Xss parameter in order to be as close as possible of a running cassandra instance ? Replaying a commit led to java.lang.StackOverflowError and node crash - Key: CASSANDRA-6181 URL: https://issues.apache.org/jira/browse/CASSANDRA-6181 Project: Cassandra Issue Type: Bug Environment: 1.2.8 1.2.10 - ubuntu 12.04 Reporter: Jeffrey Damick Assignee: Sylvain Lebresne Priority: Critical Fix For: 1.2.12 Attachments: 6181.txt 2 of our nodes died after attempting to replay a commit. I can attach the commit log file if that helps. It was occurring on 1.2.8, after several failed attempts to start, we attempted startup with 1.2.10. This also yielded the same issue (below). The only resolution was to physically move the commit log file out of the way and then the nodes were able to start... The replication factor was 3 so I'm hoping there was no data loss... {code} INFO [main] 2013-10-11 14:50:35,891 CommitLogReplayer.java (line 119) Replaying /ebs/cassandra/commitlog/CommitLog-2-1377542389560.log ERROR [MutationStage:18] 2013-10-11 14:50:37,387 CassandraDaemon.java (line 191) Exception in thread Thread[MutationStage:18,5,main] java.lang.StackOverflowError at org.apache.cassandra.db.marshal.TimeUUIDType.compareTimestampBytes(TimeUUIDType.java:68) at org.apache.cassandra.db.marshal.TimeUUIDType.compare(TimeUUIDType.java:57) at org.apache.cassandra.db.marshal.TimeUUIDType.compare(TimeUUIDType.java:29) at org.apache.cassandra.db.marshal.AbstractType.compareCollectionMembers(AbstractType.java:229) at org.apache.cassandra.db.marshal.AbstractCompositeType.compare(AbstractCompositeType.java:81) at org.apache.cassandra.db.marshal.AbstractCompositeType.compare(AbstractCompositeType.java:31) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:439) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) etc over and over until at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.add(RangeTombstoneList.java:144) at org.apache.cassandra.db.RangeTombstoneList.addAll(RangeTombstoneList.java:186) at org.apache.cassandra.db.DeletionInfo.add(DeletionInfo.java:180) at org.apache.cassandra.db.AtomicSortedColumns.addAllWithSizeDelta(AtomicSortedColumns.java:197) at org.apache.cassandra.db.AbstractColumnContainer.addAllWithSizeDelta(AbstractColumnContainer.java:99) at org.apache.cassandra.db.Memtable.resolve(Memtable.java:207) at org.apache.cassandra.db.Memtable.put(Memtable.java:170) at org.apache.cassandra.db.ColumnFamilyStore.apply(ColumnFamilyStore.java:745) at org.apache.cassandra.db.Table.apply(Table.java:388) at org.apache.cassandra.db.Table.apply(Table.java:353)
[jira] [Updated] (CASSANDRA-6181) Replaying a commit led to java.lang.StackOverflowError and node crash
[ https://issues.apache.org/jira/browse/CASSANDRA-6181?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6181: -- Reviewer: Fabien Rousseau Replaying a commit led to java.lang.StackOverflowError and node crash - Key: CASSANDRA-6181 URL: https://issues.apache.org/jira/browse/CASSANDRA-6181 Project: Cassandra Issue Type: Bug Environment: 1.2.8 1.2.10 - ubuntu 12.04 Reporter: Jeffrey Damick Assignee: Sylvain Lebresne Priority: Critical Fix For: 1.2.12 Attachments: 6181.txt 2 of our nodes died after attempting to replay a commit. I can attach the commit log file if that helps. It was occurring on 1.2.8, after several failed attempts to start, we attempted startup with 1.2.10. This also yielded the same issue (below). The only resolution was to physically move the commit log file out of the way and then the nodes were able to start... The replication factor was 3 so I'm hoping there was no data loss... {code} INFO [main] 2013-10-11 14:50:35,891 CommitLogReplayer.java (line 119) Replaying /ebs/cassandra/commitlog/CommitLog-2-1377542389560.log ERROR [MutationStage:18] 2013-10-11 14:50:37,387 CassandraDaemon.java (line 191) Exception in thread Thread[MutationStage:18,5,main] java.lang.StackOverflowError at org.apache.cassandra.db.marshal.TimeUUIDType.compareTimestampBytes(TimeUUIDType.java:68) at org.apache.cassandra.db.marshal.TimeUUIDType.compare(TimeUUIDType.java:57) at org.apache.cassandra.db.marshal.TimeUUIDType.compare(TimeUUIDType.java:29) at org.apache.cassandra.db.marshal.AbstractType.compareCollectionMembers(AbstractType.java:229) at org.apache.cassandra.db.marshal.AbstractCompositeType.compare(AbstractCompositeType.java:81) at org.apache.cassandra.db.marshal.AbstractCompositeType.compare(AbstractCompositeType.java:31) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:439) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) etc over and over until at org.apache.cassandra.db.RangeTombstoneList.weakInsertFrom(RangeTombstoneList.java:472) at org.apache.cassandra.db.RangeTombstoneList.insertAfter(RangeTombstoneList.java:456) at org.apache.cassandra.db.RangeTombstoneList.insertFrom(RangeTombstoneList.java:405) at org.apache.cassandra.db.RangeTombstoneList.add(RangeTombstoneList.java:144) at org.apache.cassandra.db.RangeTombstoneList.addAll(RangeTombstoneList.java:186) at org.apache.cassandra.db.DeletionInfo.add(DeletionInfo.java:180) at org.apache.cassandra.db.AtomicSortedColumns.addAllWithSizeDelta(AtomicSortedColumns.java:197) at org.apache.cassandra.db.AbstractColumnContainer.addAllWithSizeDelta(AbstractColumnContainer.java:99) at org.apache.cassandra.db.Memtable.resolve(Memtable.java:207) at org.apache.cassandra.db.Memtable.put(Memtable.java:170) at org.apache.cassandra.db.ColumnFamilyStore.apply(ColumnFamilyStore.java:745) at org.apache.cassandra.db.Table.apply(Table.java:388) at org.apache.cassandra.db.Table.apply(Table.java:353) at org.apache.cassandra.db.commitlog.CommitLogReplayer$1.runMayThrow(CommitLogReplayer.java:258) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) {code} -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6206) Thrift socket listen backlog
[ https://issues.apache.org/jira/browse/CASSANDRA-6206?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Nenad Merdanovic updated CASSANDRA-6206: Attachment: (was: cassandra-v2.patch) Thrift socket listen backlog Key: CASSANDRA-6206 URL: https://issues.apache.org/jira/browse/CASSANDRA-6206 Project: Cassandra Issue Type: Bug Components: Core Environment: Debian Linux, Java 7 Reporter: Nenad Merdanovic Fix For: 2.0.2 Attachments: cassandra.patch Although Thrift is a depreciated method of accessing Cassandra, default backlog is way too low on that socket. It shouldn't be a problem to implement it and I am including a POC patch for this (sorry, really low on time with limited Java knowledge so just to give an idea). This is an old report which was never addressed and the bug remains till this day, except in my case I have a much larger scale application with 3rd party software which I cannot modify to include connection pooling: https://issues.apache.org/jira/browse/CASSANDRA-1663 There is also a pending change in the Thrift itself which Cassandra should be able to use for parts using TServerSocket (SSL): https://issues.apache.org/jira/browse/THRIFT-1868 -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6206) Thrift socket listen backlog
[ https://issues.apache.org/jira/browse/CASSANDRA-6206?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Nenad Merdanovic updated CASSANDRA-6206: Attachment: cassandra-v2.patch It appears I mangled the patch file while making a simple change, sorry. I have uploaded the one that is good. Thrift socket listen backlog Key: CASSANDRA-6206 URL: https://issues.apache.org/jira/browse/CASSANDRA-6206 Project: Cassandra Issue Type: Bug Components: Core Environment: Debian Linux, Java 7 Reporter: Nenad Merdanovic Fix For: 2.0.2 Attachments: cassandra.patch, cassandra-v2.patch Although Thrift is a depreciated method of accessing Cassandra, default backlog is way too low on that socket. It shouldn't be a problem to implement it and I am including a POC patch for this (sorry, really low on time with limited Java knowledge so just to give an idea). This is an old report which was never addressed and the bug remains till this day, except in my case I have a much larger scale application with 3rd party software which I cannot modify to include connection pooling: https://issues.apache.org/jira/browse/CASSANDRA-1663 There is also a pending change in the Thrift itself which Cassandra should be able to use for parts using TServerSocket (SSL): https://issues.apache.org/jira/browse/THRIFT-1868 -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6194) speculative retry can sometimes violate consistency
[ https://issues.apache.org/jira/browse/CASSANDRA-6194?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6194: -- Attachment: 6194.txt Patch attached to wait for all the contacted replicas on DME. Not sure how this could cause the test failure though so it's kind of a shot in the dark. speculative retry can sometimes violate consistency --- Key: CASSANDRA-6194 URL: https://issues.apache.org/jira/browse/CASSANDRA-6194 Project: Cassandra Issue Type: Bug Components: Core Reporter: Brandon Williams Assignee: Jonathan Ellis Fix For: 2.0.2 Attachments: 6194.txt This is most evident with intermittent failures of the short_read dtests. I'll focus on short_read_reversed_test for explanation, since that's what I used to bisect. This test inserts some columns into a row, then deletes a subset, but it performs each delete on a different node, with another node down (hints are disabled.) Finally it reads the row back at QUORUM and checks that it doesn't see any deleted columns, however with speculative retry on this often fails. I bisected this to the change that made 99th percentile SR the default reliably by looping the test enough times at each iteration to be sure it was passing or failing. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6120) Boolean constants syntax is not consistent between DDL and DML in CQL
[ https://issues.apache.org/jira/browse/CASSANDRA-6120?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801829#comment-13801829 ] Sylvain Lebresne commented on CASSANDRA-6120: - Not really sure what to do here. The inconsistency is not just for boolean, DDL properties are not really type-checked due to being handled specifically by PropertyDefinitions. In particular, note that in the replication map, one of the value ('class') is a string while the other ('replication_factor') is an int, which is impossible in a DML. On that part, to be consistent, we'd need to force replication_factor to be a string, and I'm not sure that's a good idea because that would be uglier and more importantly would break too many people imho. Of course, It's still possible to type-check property values more thoroughly (while letting map values not all be of the same type) but we would still not have full consistency between DDL and DML on that front. It's also not a one-liner fix: it requires to refactor PropertyDefinitions, CFPropDefs and KSPropDefs relatively heavily (not that those classes are huge but still). And there's the backward compatibility issue. Overall, If we had released CQL3 a week ago, I'd said sure, let's clean it up. But with CQL3 being almost a year old, I'm leaning towards just leaving it the way it is. Other opinions? Boolean constants syntax is not consistent between DDL and DML in CQL - Key: CASSANDRA-6120 URL: https://issues.apache.org/jira/browse/CASSANDRA-6120 Project: Cassandra Issue Type: Bug Reporter: Michaël Figuière Assignee: Sylvain Lebresne Priority: Trivial DDL statements allow boolean constants to be either quoted or unquoted as: {code} CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true; CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = 'true'; {code} While DML statements only allow unquoted boolean constants. While this is not a big deal, it can introduce a bit of confusion for the users. Fixing this lack of syntax consistency would break the existing scripts, so that's something we might want to consider next time we'll introduce some breaking changes in CQL... -- This message was sent by Atlassian JIRA (v6.1#6144)
git commit: Revert use of speculative retries by default for now
Updated Branches: refs/heads/cassandra-2.0 dfb976573 - 66fe5bdaa Revert use of speculative retries by default for now Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/66fe5bda Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/66fe5bda Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/66fe5bda Branch: refs/heads/cassandra-2.0 Commit: 66fe5bdaaa1f5e22d33528c3d2313cc23e7621d8 Parents: dfb9765 Author: Sylvain Lebresne sylv...@datastax.com Authored: Tue Oct 22 16:08:53 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Tue Oct 22 16:08:53 2013 +0200 -- NEWS.txt | 2 -- src/java/org/apache/cassandra/config/CFMetaData.java | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/66fe5bda/NEWS.txt -- diff --git a/NEWS.txt b/NEWS.txt index 69ab4fd..e39d5cd 100644 --- a/NEWS.txt +++ b/NEWS.txt @@ -19,8 +19,6 @@ using the provided 'sstableupgrade' tool. New features -- Speculative retry defaults to 99th percentile - (See blog post at http://www.datastax.com/dev/blog/rapid-read-protection-in-cassandra-2-0-2) - Configurable metrics reporting (see conf/metrics-reporter-config-sample.yaml) - Compaction history and stats are now saved to system keyspace http://git-wip-us.apache.org/repos/asf/cassandra/blob/66fe5bda/src/java/org/apache/cassandra/config/CFMetaData.java -- diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java index d63ee01..479dafc 100644 --- a/src/java/org/apache/cassandra/config/CFMetaData.java +++ b/src/java/org/apache/cassandra/config/CFMetaData.java @@ -83,7 +83,7 @@ public final class CFMetaData public final static Class? extends AbstractCompactionStrategy DEFAULT_COMPACTION_STRATEGY_CLASS = SizeTieredCompactionStrategy.class; public final static Caching DEFAULT_CACHING_STRATEGY = Caching.KEYS_ONLY; public final static int DEFAULT_DEFAULT_TIME_TO_LIVE = 0; -public final static SpeculativeRetry DEFAULT_SPECULATIVE_RETRY = new SpeculativeRetry(SpeculativeRetry.RetryType.PERCENTILE, 0.99); +public final static SpeculativeRetry DEFAULT_SPECULATIVE_RETRY = new SpeculativeRetry(SpeculativeRetry.RetryType.NONE, 0); public final static int DEFAULT_INDEX_INTERVAL = 128; public final static boolean DEFAULT_POPULATE_IO_CACHE_ON_FLUSH = false;
[jira] [Created] (CASSANDRA-6228) Add view trace session to cqlsh
Jeremiah Jordan created CASSANDRA-6228: -- Summary: Add view trace session to cqlsh Key: CASSANDRA-6228 URL: https://issues.apache.org/jira/browse/CASSANDRA-6228 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Jeremiah Jordan Priority: Trivial It would be nice if cqlsh had a command to pass a tracing session id in, and have it print out the trace the same way it does when tracing is on. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Assigned] (CASSANDRA-6228) Add view trace session to cqlsh
[ https://issues.apache.org/jira/browse/CASSANDRA-6228?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis reassigned CASSANDRA-6228: - Assignee: Mikhail Mazursky Add view trace session to cqlsh - Key: CASSANDRA-6228 URL: https://issues.apache.org/jira/browse/CASSANDRA-6228 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Jeremiah Jordan Assignee: Mikhail Mazursky Priority: Trivial It would be nice if cqlsh had a command to pass a tracing session id in, and have it print out the trace the same way it does when tracing is on. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6225) GCInspector should not wait after ConcurrentMarkSweep GC to flush memtables and reduce cache size
[ https://issues.apache.org/jira/browse/CASSANDRA-6225?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801887#comment-13801887 ] Billow Gao commented on CASSANDRA-6225: --- We tried to use {code} flush_largest_memtables_at = 1.0; reduce_cache_sizes_at = 1.0; {code} The heap usage remained at 100% in Cassandra 1.2.9. Just hang up there after we run stress write test for a while. GCInspector should not wait after ConcurrentMarkSweep GC to flush memtables and reduce cache size - Key: CASSANDRA-6225 URL: https://issues.apache.org/jira/browse/CASSANDRA-6225 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 1.2.9, SunOS, Java 7 Reporter: Billow Gao In GCInspector.logGCResults, cassandra won't flush memtables and reduce Cache Sizes until there is a ConcurrentMarkSweep GC. It caused a long pause on the service. And other nodes could mark it as DEAD. In our stress test, we were using 64 concurrent threads to write data to cassandra. The heap usage grew up quickly and reach to maximum. We saw several ConcurrentMarkSweep GCs which only freed very few rams until a memtable flush was called. The other nodes marked the node as DOWN when GC took more than 20 seconds. {code} INFO [ScheduledTasks:1] 2013-10-18 15:42:36,176 GCInspector.java (line 119) GC for ConcurrentMarkSweep: 27481 ms for 1 collections, 5229917848 used; max is 6358564864 INFO [ScheduledTasks:1] 2013-10-18 15:43:14,013 GCInspector.java (line 119) GC for ConcurrentMarkSweep: 27729 ms for 1 collections, 5381504752 used; max is 6358564864 INFO [ScheduledTasks:1] 2013-10-18 15:43:50,565 GCInspector.java (line 119) GC for ConcurrentMarkSweep: 29867 ms for 1 collections, 5479631256 used; max is 6358564864 INFO [ScheduledTasks:1] 2013-10-18 15:44:23,457 GCInspector.java (line 119) GC for ConcurrentMarkSweep: 28166 ms for 1 collections, 5545752344 used; max is 6358564864 INFO [ScheduledTasks:1] 2013-10-18 15:44:58,290 GCInspector.java (line 119) GC for ConcurrentMarkSweep: 29377 ms for 2 collections, 5343255456 used; max is 6358564864 {code} {code} INFO [GossipTasks:1] 2013-10-18 15:42:29,004 Gossiper.java (line 803) InetAddress /1.2.3.4 is now DOWN INFO [GossipTasks:1] 2013-10-18 15:43:06,901 Gossiper.java (line 803) InetAddress /1.2.3.4 is now DOWN INFO [GossipTasks:1] 2013-10-18 15:44:18,254 Gossiper.java (line 803) InetAddress /1.2.3.4 is now DOWN INFO [GossipTasks:1] 2013-10-18 15:44:48,507 Gossiper.java (line 803) InetAddress /1.2.3.4 is now DOWN INFO [GossipTasks:1] 2013-10-18 15:45:32,375 Gossiper.java (line 803) InetAddress /1.2.3.4 is now DOWN {code} We found two solutions to fix the long pause which result in a DOWN status. 1. We reduced the maximum ram to 3G. The behavior is the same, but gc was faster(under 20 seconds), so no nodes were marked as DOWN 2. Running a cronjob on the cassandra server which period call nodetool -h localhost flush. Flush after a full gc just make thing worse and waste time spent on GC. In a heavily load system, you would have several full GCs before a flush can finish. (a flush may take more than 30 seconds) Ideally, GCInspector should has a better logic on when to flush memtable. 1. Flush memtable/reduce cache size when it reached the threshold(smaller than full gc threshold). 2. prevent frequently flush by remembering the last running time. If we call flush before a full gc, then the full gc will release those rams occupied by memtable. Thus reduce the heap usage a lot. Otherwise, full gc will be called again and again until a flush was finished. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Created] (CASSANDRA-6229) transfers never occur after enabling a shuffle
Daniel Meyer created CASSANDRA-6229: --- Summary: transfers never occur after enabling a shuffle Key: CASSANDRA-6229 URL: https://issues.apache.org/jira/browse/CASSANDRA-6229 Project: Cassandra Issue Type: Bug Components: Tools Reporter: Daniel Meyer Using the following documentation as reference: http://www.datastax.com/documentation/cassandra/2.0/webhelp/cassandra/tools/toolsCassandraShuffle.html After running the 'shuffle en' step, the command does not block giving the indication that a transfer has occurred. However, running 'shuffle ls' at an arbitrary time after enabling the shuffle results in a large list of pending transfers. From the users perspective it is unclear what is happening; however, it looks like the transfer does not actually occur. repro steps: 1) cd to latest cassandra-2.0 branch 2) ccm create shuffle_test 3 ccm populate -n 3 4) delete the 'num_tokens' line from each cassandra.yaml file for all three nodes. 5) ccm start 6) run 'ccm node1 ring' and verify the vnodes are not enabled. 7) ccm node1 stress -o insert 8) for each node set 'num_tokens: 256' in the cassandra.yaml 9) restart the nodes 10) run 'ccm node1 ring' to verify vnodes is enabled 11) ccm node1 shuffle create 12) ccm node1 shuffle en 13) wait arbitrary amount of time and run 'ccm node1 shuffle ls'. Expected: transfers should eventually happen and not be observed with 'shuffle ls' Actual: transfers never seem to occur. If in fact they do occur it is not obvious. If transfers do in fact occur it is very difficult to tell. This was initially discovered on a real cluster and the cluster sat overnight without any transfers happening. As a user I would also expect 'shuffle en' to block. The non blocking behavior does not seem ideal from a user perspective. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Assigned] (CASSANDRA-6228) Add view trace session to cqlsh
[ https://issues.apache.org/jira/browse/CASSANDRA-6228?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis reassigned CASSANDRA-6228: - Assignee: Mikhail Stepura (was: Mikhail Mazursky) Add view trace session to cqlsh - Key: CASSANDRA-6228 URL: https://issues.apache.org/jira/browse/CASSANDRA-6228 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Jeremiah Jordan Assignee: Mikhail Stepura Priority: Trivial It would be nice if cqlsh had a command to pass a tracing session id in, and have it print out the trace the same way it does when tracing is on. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6228) Add view trace session to cqlsh
[ https://issues.apache.org/jira/browse/CASSANDRA-6228?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Aleksey Yeschenko updated CASSANDRA-6228: - Reviewer: Aleksey Yeschenko Add view trace session to cqlsh - Key: CASSANDRA-6228 URL: https://issues.apache.org/jira/browse/CASSANDRA-6228 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Jeremiah Jordan Assignee: Mikhail Stepura Priority: Trivial It would be nice if cqlsh had a command to pass a tracing session id in, and have it print out the trace the same way it does when tracing is on. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-3376) Track buffer cache hit rate with mincore
[ https://issues.apache.org/jira/browse/CASSANDRA-3376?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801934#comment-13801934 ] Jonathan Ellis commented on CASSANDRA-3376: --- bq. we don't know how much of an sstable we'll need to read for a given slice ahead of time Thinking about it more, just checking the position where we start the slice would probably be Good Enough (certainly better than nothing). Track buffer cache hit rate with mincore Key: CASSANDRA-3376 URL: https://issues.apache.org/jira/browse/CASSANDRA-3376 Project: Cassandra Issue Type: New Feature Reporter: Jonathan Ellis Priority: Minor Labels: ponies We could use mincore on, say, 1% of reads to be able to report actual buffer cache hit rate. This would be useful when troubleshooting slow reads, e.g., are reads to CF X slow because it's hitting disk, or because there are a lot of tombstones in the row? -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6179) Load calculated in nodetool info is strange/inaccurate in JBOD setups
[ https://issues.apache.org/jira/browse/CASSANDRA-6179?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801936#comment-13801936 ] Jonathan Ellis commented on CASSANDRA-6179: --- Boo. What about adding to cfstats then? Load calculated in nodetool info is strange/inaccurate in JBOD setups - Key: CASSANDRA-6179 URL: https://issues.apache.org/jira/browse/CASSANDRA-6179 Project: Cassandra Issue Type: Bug Environment: JBOD layouts Reporter: J. Ryan Earl Attachments: dump.txt We recently noticed that the storage capacity on Cassandra nodes using JBOD layout was returning what looks close to the average data volume size, instead of the sum of all JBOD data volumes. It's not exactly an average and I haven't had time to dig into the code to see what it's really doing, it's like some sort of sample of the JBOD volumes sizes. So looking at the JBOD volumes we see: {noformat} [jre@cassandra2 ~]$ df -h FilesystemSize Used Avail Use% Mounted on [...] /dev/sdc1 1.1T 9.4G 1.1T 1% /data/1 /dev/sdd1 1.1T 9.2G 1.1T 1% /data/2 /dev/sde1 1.1T 11G 1.1T 1% /data/3 /dev/sdf1 1.1T 11G 1.1T 1% /data/4 /dev/sdg1 1.1T 9.2G 1.1T 1% /data/5 /dev/sdh1 1.1T 11G 1.1T 1% /data/6 /dev/sdi1 1.1T 9.8G 1.1T 1% /data/7 {noformat} Looking at 'nodetool info' we see: {noformat} [jre@cassandra2 ~]$ nodetool info Token: (invoke with -T/--tokens to see all 256 tokens) ID : 631f0be3-ce52-4eb9-b48b-069fbfdf0a97 Gossip active: true Thrift active: true Native Transport active: true Load : 10.57 GB {noformat} So there are 7 disks in a JBOD configuration in this example, the sum should be closer to 70G for each node. Maybe we're misinterpreting what this value should be, but things like OpsCenter appear to use this load value as the size of data on the local node, which I expect to be the sum of JBOD volumes. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6179) Load calculated in nodetool info is strange/inaccurate in JBOD setups
[ https://issues.apache.org/jira/browse/CASSANDRA-6179?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801921#comment-13801921 ] Brandon Williams commented on CASSANDRA-6179: - I think we'd have to gossip that separately, similar to how we gossip LOAD now. Load calculated in nodetool info is strange/inaccurate in JBOD setups - Key: CASSANDRA-6179 URL: https://issues.apache.org/jira/browse/CASSANDRA-6179 Project: Cassandra Issue Type: Bug Environment: JBOD layouts Reporter: J. Ryan Earl Attachments: dump.txt We recently noticed that the storage capacity on Cassandra nodes using JBOD layout was returning what looks close to the average data volume size, instead of the sum of all JBOD data volumes. It's not exactly an average and I haven't had time to dig into the code to see what it's really doing, it's like some sort of sample of the JBOD volumes sizes. So looking at the JBOD volumes we see: {noformat} [jre@cassandra2 ~]$ df -h FilesystemSize Used Avail Use% Mounted on [...] /dev/sdc1 1.1T 9.4G 1.1T 1% /data/1 /dev/sdd1 1.1T 9.2G 1.1T 1% /data/2 /dev/sde1 1.1T 11G 1.1T 1% /data/3 /dev/sdf1 1.1T 11G 1.1T 1% /data/4 /dev/sdg1 1.1T 9.2G 1.1T 1% /data/5 /dev/sdh1 1.1T 11G 1.1T 1% /data/6 /dev/sdi1 1.1T 9.8G 1.1T 1% /data/7 {noformat} Looking at 'nodetool info' we see: {noformat} [jre@cassandra2 ~]$ nodetool info Token: (invoke with -T/--tokens to see all 256 tokens) ID : 631f0be3-ce52-4eb9-b48b-069fbfdf0a97 Gossip active: true Thrift active: true Native Transport active: true Load : 10.57 GB {noformat} So there are 7 disks in a JBOD configuration in this example, the sum should be closer to 70G for each node. Maybe we're misinterpreting what this value should be, but things like OpsCenter appear to use this load value as the size of data on the local node, which I expect to be the sum of JBOD volumes. -- This message was sent by Atlassian JIRA (v6.1#6144)
[2/2] git commit: License and versions for 2.0.2 release
License and versions for 2.0.2 release Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b6147c1c Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b6147c1c Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b6147c1c Branch: refs/heads/cassandra-2.0 Commit: b6147c1c7bfca817429f1e13d4dd9096bee5e626 Parents: 7159ec1 Author: Sylvain Lebresne sylv...@datastax.com Authored: Tue Oct 22 18:01:51 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Tue Oct 22 18:01:51 2013 +0200 -- .rat-excludes | 1 + build.xml | 2 +- debian/changelog| 6 ++ examples/triggers/build.xml | 19 +++ examples/triggers/conf/InvertedIndex.properties | 19 ++- .../filter/TombstoneOverwhelmingException.java | 18 ++ .../cassandra/metrics/RestorableMeter.java | 20 +++- .../apache/cassandra/net/WriteCallbackInfo.java | 18 ++ .../cassandra/pig/CqlTableDataTypeTest.java | 18 ++ .../pig/ThriftColumnFamilyDataTypeTest.java | 18 ++ 10 files changed, 136 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/.rat-excludes -- diff --git a/.rat-excludes b/.rat-excludes index 8d2ead8..78473b9 100644 --- a/.rat-excludes +++ b/.rat-excludes @@ -15,6 +15,7 @@ lib/licenses/*.txt .settings/** **/cassandra.yaml **/cassandra-topology.yaml +**/metrics-reporter-config-sample.yaml **/*.db .externalToolBuilders/** test/data/serialization/*/* http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/build.xml -- diff --git a/build.xml b/build.xml index 2703edf..09a0213 100644 --- a/build.xml +++ b/build.xml @@ -25,7 +25,7 @@ property name=debuglevel value=source,lines,vars/ !-- default version and SCM information -- -property name=base.version value=2.0.1/ +property name=base.version value=2.0.2/ property name=scm.connection value=scm:git://git.apache.org/cassandra.git/ property name=scm.developerConnection value=scm:git://git.apache.org/cassandra.git/ property name=scm.url value=http://git-wip-us.apache.org/repos/asf?p=cassandra.git;a=tree/ http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/debian/changelog -- diff --git a/debian/changelog b/debian/changelog index 61a91d7..4d99120 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +cassandra (2.0.2) unstable; urgency=low + + * New release + + -- Sylvain Lebresne slebre...@apache.org Tue, 22 Oct 2013 17:44:30 +0200 + cassandra (2.0.1) unstable; urgency=low * New release http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/examples/triggers/build.xml -- diff --git a/examples/triggers/build.xml b/examples/triggers/build.xml index 55fccce..293b08d 100644 --- a/examples/triggers/build.xml +++ b/examples/triggers/build.xml @@ -1,4 +1,23 @@ ?xml version=1.0 encoding=UTF-8? +!-- + ~ Licensed to the Apache Software Foundation (ASF) under one + ~ or more contributor license agreements. See the NOTICE file + ~ distributed with this work for additional information + ~ regarding copyright ownership. The ASF licenses this file + ~ to you under the Apache License, Version 2.0 (the + ~ License); you may not use this file except in compliance + ~ with the License. You may obtain a copy of the License at + ~ + ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ + ~ Unless required by applicable law or agreed to in writing, + ~ software distributed under the License is distributed on an + ~ AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + ~ KIND, either express or implied. See the License for the + ~ specific language governing permissions and limitations + ~ under the License. +-- + project default=jar name=trigger-example property name=cassandra.dir value=../.. / property name=cassandra.dir.lib value=${cassandra.dir}/lib / http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/examples/triggers/conf/InvertedIndex.properties -- diff --git a/examples/triggers/conf/InvertedIndex.properties b/examples/triggers/conf/InvertedIndex.properties index 6db6d61..3ec2bb0 100644 --- a/examples/triggers/conf/InvertedIndex.properties +++ b/examples/triggers/conf/InvertedIndex.properties @@ -1,2 +1,19 @@ +# Licensed to the
[1/2] git commit: Fix SSTableLoader
Updated Branches: refs/heads/cassandra-2.0 66fe5bdaa - b6147c1c7 Fix SSTableLoader patch by thobbs; reviewed by slebresne for CASSANDRA-6205 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7159ec10 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7159ec10 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7159ec10 Branch: refs/heads/cassandra-2.0 Commit: 7159ec10046fd4dce58ca9406ef096fdb965700d Parents: 66fe5bd Author: Sylvain Lebresne sylv...@datastax.com Authored: Tue Oct 22 17:42:50 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Tue Oct 22 17:42:50 2013 +0200 -- CHANGES.txt | 1 + .../org/apache/cassandra/io/sstable/SSTableReader.java| 10 -- 2 files changed, 5 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/7159ec10/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index e89ca41..ac4c010 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -26,6 +26,7 @@ * CQL3: support pre-epoch longs for TimestampType (CASSANDRA-6212) * Add reloadtriggers command to nodetool (CASSANDRA-4949) * cqlsh: ignore empty 'value alias' in DESCRIBE (CASSANDRA-6139) + * Fix sstable loader (CASSANDRA-6205) Merged from 1.2: * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Add a warning for small LCS sstable size (CASSANDRA-6191) http://git-wip-us.apache.org/repos/asf/cassandra/blob/7159ec10/src/java/org/apache/cassandra/io/sstable/SSTableReader.java -- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java index cb0873d..9837f4c 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java @@ -36,10 +36,7 @@ import org.slf4j.LoggerFactory; import org.apache.cassandra.cache.InstrumentingCache; import org.apache.cassandra.cache.KeyCacheKey; import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor; -import org.apache.cassandra.config.CFMetaData; -import org.apache.cassandra.config.ColumnDefinition; -import org.apache.cassandra.config.DatabaseDescriptor; -import org.apache.cassandra.config.Schema; +import org.apache.cassandra.config.*; import org.apache.cassandra.db.*; import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; import org.apache.cassandra.db.commitlog.ReplayPosition; @@ -327,8 +324,9 @@ public class SSTableReader extends SSTable implements Closeable deletingTask = new SSTableDeletingTask(this); -// Don't track read rates for tables in the system keyspace -if (Keyspace.SYSTEM_KS.equals(desc.ksname)) +// Don't track read rates for tables in the system keyspace and don't bother trying to load or persist +// the read meter when in client mode +if (Keyspace.SYSTEM_KS.equals(desc.ksname) || Config.isClientMode()) { readMeter = null; return;
Git Push Summary
Updated Tags: refs/tags/2.0.2-tentative [created] b6147c1c7
[jira] [Commented] (CASSANDRA-6179) Load calculated in nodetool info is strange/inaccurate in JBOD setups
[ https://issues.apache.org/jira/browse/CASSANDRA-6179?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801969#comment-13801969 ] Brandon Williams commented on CASSANDRA-6179: - bq. Boo. I didn't say it was impossible, just a bit involved :) bq. What about adding to cfstats then? Seems reasonable. Load calculated in nodetool info is strange/inaccurate in JBOD setups - Key: CASSANDRA-6179 URL: https://issues.apache.org/jira/browse/CASSANDRA-6179 Project: Cassandra Issue Type: Bug Environment: JBOD layouts Reporter: J. Ryan Earl Attachments: dump.txt We recently noticed that the storage capacity on Cassandra nodes using JBOD layout was returning what looks close to the average data volume size, instead of the sum of all JBOD data volumes. It's not exactly an average and I haven't had time to dig into the code to see what it's really doing, it's like some sort of sample of the JBOD volumes sizes. So looking at the JBOD volumes we see: {noformat} [jre@cassandra2 ~]$ df -h FilesystemSize Used Avail Use% Mounted on [...] /dev/sdc1 1.1T 9.4G 1.1T 1% /data/1 /dev/sdd1 1.1T 9.2G 1.1T 1% /data/2 /dev/sde1 1.1T 11G 1.1T 1% /data/3 /dev/sdf1 1.1T 11G 1.1T 1% /data/4 /dev/sdg1 1.1T 9.2G 1.1T 1% /data/5 /dev/sdh1 1.1T 11G 1.1T 1% /data/6 /dev/sdi1 1.1T 9.8G 1.1T 1% /data/7 {noformat} Looking at 'nodetool info' we see: {noformat} [jre@cassandra2 ~]$ nodetool info Token: (invoke with -T/--tokens to see all 256 tokens) ID : 631f0be3-ce52-4eb9-b48b-069fbfdf0a97 Gossip active: true Thrift active: true Native Transport active: true Load : 10.57 GB {noformat} So there are 7 disks in a JBOD configuration in this example, the sum should be closer to 70G for each node. Maybe we're misinterpreting what this value should be, but things like OpsCenter appear to use this load value as the size of data on the local node, which I expect to be the sum of JBOD volumes. -- This message was sent by Atlassian JIRA (v6.1#6144)
[Cassandra Wiki] Update of ContributorsGroup by BrandonWilliams
Dear Wiki user, You have subscribed to a wiki page or wiki category on Cassandra Wiki for change notification. The ContributorsGroup page has been changed by BrandonWilliams: https://wiki.apache.org/cassandra/ContributorsGroup?action=diffrev1=17rev2=18 * yukim * zznate * mkjellman + * ono_matope
[jira] [Commented] (CASSANDRA-6142) Remove multithreaded compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6142?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13801998#comment-13801998 ] Jonathan Ellis commented on CASSANDRA-6142: --- I'm guessing not super common because the existing code will just break if it hits that case. (A LCR object will throw errors if you try to use it after advancing the underlying stream to another row.) I guess the next step is probably for me to pull the fixes out for application to 2.0. Remove multithreaded compaction --- Key: CASSANDRA-6142 URL: https://issues.apache.org/jira/browse/CASSANDRA-6142 Project: Cassandra Issue Type: Bug Components: Core Reporter: Jonathan Ellis Assignee: Jonathan Ellis Priority: Minor Fix For: 2.1 There is at best a very small sweet spot for multithreaded compaction (ParallelCompactionIterable). For large rows, we stall the pipeline and fall back to a single LCR pass. For small rows, the overhead of the coordination outweighs the benefits of parallelization (45s to compact 2x1M stress rows with multithreading enabled, vs 35 with it disabled). -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6194) speculative retry can sometimes violate consistency
[ https://issues.apache.org/jira/browse/CASSANDRA-6194?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802020#comment-13802020 ] Sylvain Lebresne commented on CASSANDRA-6194: - For the record, the error returned by the dtest is: {noformat} == FAIL: short_read_reversed_test (consistency_test.TestConsistency) -- Traceback (most recent call last): File /home/mcmanus/Git/dtest-cassandra/consistency_test.py, line 309, in short_read_reversed_test assert res[i][1] == 'value%d' % (5-i), 'Expecting value%d, got %s (%s)' % (5-i, res[i][1], str(res)) AssertionError: Expecting value5, got value8 ([[u'c08', u'value8'], [u'c05', u'value5'], [u'c04', u'value4']]) -- Ran 1 test in 130.035s {noformat} speculative retry can sometimes violate consistency --- Key: CASSANDRA-6194 URL: https://issues.apache.org/jira/browse/CASSANDRA-6194 Project: Cassandra Issue Type: Bug Components: Core Reporter: Brandon Williams Assignee: Jonathan Ellis Fix For: 2.0.2 Attachments: 6194.txt This is most evident with intermittent failures of the short_read dtests. I'll focus on short_read_reversed_test for explanation, since that's what I used to bisect. This test inserts some columns into a row, then deletes a subset, but it performs each delete on a different node, with another node down (hints are disabled.) Finally it reads the row back at QUORUM and checks that it doesn't see any deleted columns, however with speculative retry on this often fails. I bisected this to the change that made 99th percentile SR the default reliably by looping the test enough times at each iteration to be sure it was passing or failing. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-6199) Improve Stress Tool
[ https://issues.apache.org/jira/browse/CASSANDRA-6199?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Benedict updated CASSANDRA-6199: Description: The stress tool could do with sprucing up. The following is a list of essential improvements and things that would be nice to have. Essential: - Reduce variability of results, especially start/end tails. Do not trash first/last 10% of readings - Reduce contention/overhead in stress to increase overall throughput - Short warm-up period, which is ignored for summary (or summarised separately), though prints progress as usual. Potentially automatic detection of rate levelling. - Better configurability and defaults for data generation - current column generation populates columns with the same value for every row, which is very easily compressible. Possibly introduce partial random data generator (possibly dictionary-based random data generator) Nice to have: - Calculate and print stdev and mean - Add batched sequential access mode (where a single thread performs batch-size sequential requests before selecting another random key) to test how key proximity affects performance - Auto-mode which attempts to establish the maximum throughput rate, by varying the thread count (or otherwise gating the number of parallel requests) for some period, then configures rate limit or thread count to test performance at e.g. 30%, 50%, 70%, 90%, 120%, 150% and unconstrained. - Auto-mode could have a target variance ratio for mean throughput and/or latency, and completes a test once this target is hit for x intervals - Fix key representation so independent of number of keys (possibly switch to 10 digit hex), and don't use String.format().getBytes() to construct it (expensive) Also, remove the skip-key setting, as it is currently ignored. Unless somebody knows the reason for it. - Fix latency stats - Read/write mode, with configurable recency-of-reads distribution - Add new exponential/extreme value distribution for value size, column count and recency-of-reads - Support more than 2^31 keys - Supports multiple concurrent stress inserts via key-offset parameter or similar was: The stress tool could do with sprucing up. The following is a list of essential improvements and things that would be nice to have. Essential: - Reduce variability of results, especially start/end tails. Do not trash first/last 10% of readings - Reduce contention/overhead in stress to increase overall throughput - Short warm-up period, which is ignored for summary (or summarised separately), though prints progress as usual. Potentially automatic detection of rate levelling. - Better configurability and defaults for data generation - current column generation populates columns with the same value for every row, which is very easily compressible. Nice to have: - Calculate and print stdev and mean - Add batched sequential access mode (where a single thread performs batch-size sequential requests before selecting another random key) to test how key proximity affects performance - Auto-mode which attempts to establish the maximum throughput rate, by varying the thread count (or otherwise gating the number of parallel requests) for some period, then configures rate limit or thread count to test performance at e.g. 30%, 50%, 70%, 90%, 120%, 150% and unconstrained. - Auto-mode could have a target variance ratio for mean throughput and/or latency, and completes a test once this target is hit for x intervals - Fix key representation so independent of number of keys (possibly switch to 10 digit hex), and don't use String.format().getBytes() to construct it (expensive) Also, remove the skip-key setting, as it is currently ignored. Unless somebody knows the reason for it. Improve Stress Tool --- Key: CASSANDRA-6199 URL: https://issues.apache.org/jira/browse/CASSANDRA-6199 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Benedict Assignee: Benedict Priority: Minor The stress tool could do with sprucing up. The following is a list of essential improvements and things that would be nice to have. Essential: - Reduce variability of results, especially start/end tails. Do not trash first/last 10% of readings - Reduce contention/overhead in stress to increase overall throughput - Short warm-up period, which is ignored for summary (or summarised separately), though prints progress as usual. Potentially automatic detection of rate levelling. - Better configurability and defaults for data generation - current column generation populates columns with the same value for every row, which is very easily compressible. Possibly introduce partial random data generator (possibly dictionary-based random data generator) Nice to have: - Calculate and print stdev and mean
[jira] [Commented] (CASSANDRA-6134) More efficient BatchlogManager
[ https://issues.apache.org/jira/browse/CASSANDRA-6134?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802135#comment-13802135 ] Oleg Anastasyev commented on CASSANDRA-6134: bq. This isn't what we want to ensure though. The current timeout (write timeout * 2) is there to account for maximum batchlog write timeout + actual data write timeout. Avoiding extra mutations is IMO more important than having less delay in the failure scenario (and slow writes would happen more often than outright failures). As we discussed earlier, whole batchlog thing makes little sense, if clients cannot read their own writes. Consider client written to batchlog very fast and timed out from coordinator having batch half applied. Reading from another coordinator it would see batch partially applied for almost yet another write timeout. So just having write timeout*2 is not a good idea. From the other hand, hammering is one-by-one replay of unplayed mutation. Dont think this could be an issue practically. +1 having RateLimiter there, so hammering could be more limited. bq. -1 on using writeTime for TTL calculation from the UUID (the time can actually jump, but uuids will always increase, and it's not what we want for TTL calc) Do you mean time jumping, if operator forcibly changes time on machine or some other scenario ? bq. making the table COMPACT STORAGE limits our flexibility wrt future batchlog schema changes, so -1 on that Using it without COMPACT STORAGE will add 2x to memory and disk. Does supporting change really neccessary ? I did not noticed any changes to original structure since very beginning of batchlog. bq. We should avoid any potentially brittle/breaking extra migration code on the already slow-ish startup. Um, i did not thinking about migrating old batchlog records on startup. This cannot be done, because old version nodes will continue to write old format batchlog entries while operator roll upgrades cluster. What i was thinking is having BatchlogManagerOld reading from old batchlog CF and replaying batches old way; And having BatchlogManager, reading from new batchlog2 CF and replaying batchlogs new way. As soon as all nodes are upgraded they start to write ti new batchlog2 CF, so BatchlogManagerOld after it precessed all old records reads nothing from old batchlog CF, and basically does a NOP cycle every 60 secs. So the migration is not so big deal to aim at not changing structure of batch log so badly. More efficient BatchlogManager -- Key: CASSANDRA-6134 URL: https://issues.apache.org/jira/browse/CASSANDRA-6134 Project: Cassandra Issue Type: Improvement Reporter: Oleg Anastasyev Priority: Minor Attachments: BatchlogManager.txt As we discussed earlier in CASSANDRA-6079 this is the new BatchManager. It stores batch records in {code} CREATE TABLE batchlog ( id_partition int, id timeuuid, data blob, PRIMARY KEY (id_partition, id) ) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (id DESC) {code} where id_partition is minute-since-epoch of id uuid. So when it scans for batches to replay ot scans within a single partition for a slice of ids since last processed date till now minus write timeout. So no full batchlog CF scan and lot of randrom reads are made on normal cycle. Other improvements: 1. It runs every 1/2 of write timeout and replays all batches written within 0.9 * write timeout from now. This way we ensure, that batched updates will be replayed to th moment client times out from coordinator. 2. It submits all mutations from single batch in parallel (Like StorageProxy do). Old implementation played them one-by-one, so client can see half applied batches in CF for a long time (depending on size of batch). 3. It fixes a subtle racing bug with incorrect hint ttl calculation -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-4809) Allow restoring specific column families from archived commitlog
[ https://issues.apache.org/jira/browse/CASSANDRA-4809?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Lyuben Todorov updated CASSANDRA-4809: -- Attachment: 4809_v4.patch Switched to using a collection of cfs to be replayed where if no specific ks/cf(s) are supplied everything is replayed. Added a .trim() to the ks and cf params that can be supplied. Param is still expected to be a comma separated list (so we can allow multiple keyspaces and multiple cfs) {code} ./cassandra -Dcassandra.replayList=system.schema_columnfamilies,system.schema_keyspaces,ks1.cf1 {code} Allow restoring specific column families from archived commitlog Key: CASSANDRA-4809 URL: https://issues.apache.org/jira/browse/CASSANDRA-4809 Project: Cassandra Issue Type: Improvement Affects Versions: 1.2.0 Reporter: Nick Bailey Assignee: Lyuben Todorov Labels: lhf Fix For: 2.0.2 Attachments: 4809.patch, 4809_v2.patch, 4809_v3.patch, 4809_v4.patch Currently you can only restore the entire contents of a commit log archive. It would be useful to specify the keyspaces/column families you want to restore from an archived commitlog. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6109) Consider coldness in STCS compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6109?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802167#comment-13802167 ] Tyler Hobbs commented on CASSANDRA-6109: bq. What if we just added a bucket filter that said, SSTables representing less than X% of the reads will not be bucketed? To be clear, you're suggesting ignoring *buckets* whose reads make up less than X% of the total reads/sec for the table, correct? bq. Straightforward to tune and I can't think of any really pathological cases, other than where size-tiering just doesn't put hot overlapping sstables in the same bucket. This is definitely easier to tune. One case I'm concerned about is where the max compaction threshold prevents a bucket from ever being above X% of the total reads/sec, especially with new, small SSTables. If we compare reads *per key* per second instead of just reads/sec, that case goes away. Additionally, while comparing reads/sec would focus compactions on the largest SSTables, comparing reads per key per second would focus on compacting the hottest SSTables, which is an improvement. With that change, I really like this strategy. As far as the default threshold goes, I'll suggest a conservative 2 to 5%. Here's my thought process: there are usually roughly 5 tiers, so each tier should get about 20% of the total reads per key per second if all SSTables were equally hot. Cold sstables should have below 10 to 25% of the normal read rates, giving a 2 to 5% threshold. Consider coldness in STCS compaction Key: CASSANDRA-6109 URL: https://issues.apache.org/jira/browse/CASSANDRA-6109 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Fix For: 2.0.2 Attachments: 6109-v1.patch, 6109-v2.patch I see two options: # Don't compact cold sstables at all # Compact cold sstables only if there is nothing more important to compact The latter is better if you have cold data that may become hot again... but it's confusing if you have a workload such that you can't keep up with *all* compaction, but you can keep up with hot sstable. (Compaction backlog stat becomes useless since we fall increasingly behind.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Comment Edited] (CASSANDRA-6109) Consider coldness in STCS compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6109?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802167#comment-13802167 ] Tyler Hobbs edited comment on CASSANDRA-6109 at 10/22/13 7:33 PM: -- bq. What if we just added a bucket filter that said, SSTables representing less than X% of the reads will not be bucketed? To be clear, you're suggesting ignoring _buckets_ whose reads make up less than X% of the total reads/sec for the table, correct? bq. Straightforward to tune and I can't think of any really pathological cases, other than where size-tiering just doesn't put hot overlapping sstables in the same bucket. This is definitely easier to tune. One case I'm concerned about is where the max compaction threshold prevents a bucket from ever being above X% of the total reads/sec, especially with new, small SSTables. If we compare reads _per key_ per second instead of just reads/sec, that case goes away. Additionally, while comparing reads/sec would focus compactions on the largest SSTables, comparing reads per key per second would focus on compacting the hottest SSTables, which is an improvement. With that change, I really like this strategy. As far as the default threshold goes, I'll suggest a conservative 2 to 5%. Here's my thought process: there are usually roughly 5 tiers, so each tier should get about 20% of the total reads per key per second if all SSTables were equally hot. Cold sstables should have below 10 to 25% of the normal read rates, giving a 2 to 5% threshold. was (Author: thobbs): bq. What if we just added a bucket filter that said, SSTables representing less than X% of the reads will not be bucketed? To be clear, you're suggesting ignoring *buckets* whose reads make up less than X% of the total reads/sec for the table, correct? bq. Straightforward to tune and I can't think of any really pathological cases, other than where size-tiering just doesn't put hot overlapping sstables in the same bucket. This is definitely easier to tune. One case I'm concerned about is where the max compaction threshold prevents a bucket from ever being above X% of the total reads/sec, especially with new, small SSTables. If we compare reads *per key* per second instead of just reads/sec, that case goes away. Additionally, while comparing reads/sec would focus compactions on the largest SSTables, comparing reads per key per second would focus on compacting the hottest SSTables, which is an improvement. With that change, I really like this strategy. As far as the default threshold goes, I'll suggest a conservative 2 to 5%. Here's my thought process: there are usually roughly 5 tiers, so each tier should get about 20% of the total reads per key per second if all SSTables were equally hot. Cold sstables should have below 10 to 25% of the normal read rates, giving a 2 to 5% threshold. Consider coldness in STCS compaction Key: CASSANDRA-6109 URL: https://issues.apache.org/jira/browse/CASSANDRA-6109 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Fix For: 2.0.2 Attachments: 6109-v1.patch, 6109-v2.patch I see two options: # Don't compact cold sstables at all # Compact cold sstables only if there is nothing more important to compact The latter is better if you have cold data that may become hot again... but it's confusing if you have a workload such that you can't keep up with *all* compaction, but you can keep up with hot sstable. (Compaction backlog stat becomes useless since we fall increasingly behind.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6109) Consider coldness in STCS compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6109?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802176#comment-13802176 ] Jonathan Ellis commented on CASSANDRA-6109: --- bq. you're suggesting ignoring buckets? No, I'm suggesting instead of {{getBuckets(sstables)}}, {{getBuckets(sstable for sstable in sstables if recents_reads_from(sstable) X)}} Consider coldness in STCS compaction Key: CASSANDRA-6109 URL: https://issues.apache.org/jira/browse/CASSANDRA-6109 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Fix For: 2.0.2 Attachments: 6109-v1.patch, 6109-v2.patch I see two options: # Don't compact cold sstables at all # Compact cold sstables only if there is nothing more important to compact The latter is better if you have cold data that may become hot again... but it's confusing if you have a workload such that you can't keep up with *all* compaction, but you can keep up with hot sstable. (Compaction backlog stat becomes useless since we fall increasingly behind.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6194) speculative retry can sometimes violate consistency
[ https://issues.apache.org/jira/browse/CASSANDRA-6194?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802182#comment-13802182 ] Brandon Williams commented on CASSANDRA-6194: - Worth noting that there's no real pattern to the columns, other than they're wrong {noformat} AssertionError: Expecting value5, got value7 ([[u'c07', u'value7'], [u'c05', u'value5'], [u'c04', u'value4']]) {noformat} speculative retry can sometimes violate consistency --- Key: CASSANDRA-6194 URL: https://issues.apache.org/jira/browse/CASSANDRA-6194 Project: Cassandra Issue Type: Bug Components: Core Reporter: Brandon Williams Assignee: Jonathan Ellis Fix For: 2.0.2 Attachments: 6194.txt This is most evident with intermittent failures of the short_read dtests. I'll focus on short_read_reversed_test for explanation, since that's what I used to bisect. This test inserts some columns into a row, then deletes a subset, but it performs each delete on a different node, with another node down (hints are disabled.) Finally it reads the row back at QUORUM and checks that it doesn't see any deleted columns, however with speculative retry on this often fails. I bisected this to the change that made 99th percentile SR the default reliably by looping the test enough times at each iteration to be sure it was passing or failing. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6109) Consider coldness in STCS compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6109?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802206#comment-13802206 ] Tyler Hobbs commented on CASSANDRA-6109: No, I'm suggesting instead of getBuckets(sstables), getBuckets(sstable for sstable in sstables if recents_reads_from(sstable) X) Ah, well that scheme has some problematic cases: * Many cold sstables that collectively make up a large percentage of reads in aggregate may be ignored (like your 10, 1, 1, 1... case above) * It's possible to have no sstables that cross the threshold when they are equally hot Consider coldness in STCS compaction Key: CASSANDRA-6109 URL: https://issues.apache.org/jira/browse/CASSANDRA-6109 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Fix For: 2.0.2 Attachments: 6109-v1.patch, 6109-v2.patch I see two options: # Don't compact cold sstables at all # Compact cold sstables only if there is nothing more important to compact The latter is better if you have cold data that may become hot again... but it's confusing if you have a workload such that you can't keep up with *all* compaction, but you can keep up with hot sstable. (Compaction backlog stat becomes useless since we fall increasingly behind.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Comment Edited] (CASSANDRA-6109) Consider coldness in STCS compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6109?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802206#comment-13802206 ] Tyler Hobbs edited comment on CASSANDRA-6109 at 10/22/13 8:05 PM: -- bq. No, I'm suggesting instead of getBuckets(sstables), getBuckets(sstable for sstable in sstables if recents_reads_from(sstable) X) Ah, well that scheme has some problematic cases: * Many cold sstables that collectively make up a large percentage of reads in aggregate may be ignored (like your 10, 1, 1, 1... case above) * It's possible to have no sstables that cross the threshold when they are equally hot was (Author: thobbs): No, I'm suggesting instead of getBuckets(sstables), getBuckets(sstable for sstable in sstables if recents_reads_from(sstable) X) Ah, well that scheme has some problematic cases: * Many cold sstables that collectively make up a large percentage of reads in aggregate may be ignored (like your 10, 1, 1, 1... case above) * It's possible to have no sstables that cross the threshold when they are equally hot Consider coldness in STCS compaction Key: CASSANDRA-6109 URL: https://issues.apache.org/jira/browse/CASSANDRA-6109 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Fix For: 2.0.2 Attachments: 6109-v1.patch, 6109-v2.patch I see two options: # Don't compact cold sstables at all # Compact cold sstables only if there is nothing more important to compact The latter is better if you have cold data that may become hot again... but it's confusing if you have a workload such that you can't keep up with *all* compaction, but you can keep up with hot sstable. (Compaction backlog stat becomes useless since we fall increasingly behind.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6220) Unable to select multiple entries using In clause on clustering part of compound key
[ https://issues.apache.org/jira/browse/CASSANDRA-6220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802209#comment-13802209 ] Constance Eustace commented on CASSANDRA-6220: -- My current thinking is that truncation / schema recreation disrupts synchronization of compaction with the searching datastructure/sstables, and until a full compaction completes, updates after truncation/schema recreation are iffy... Unable to select multiple entries using In clause on clustering part of compound key Key: CASSANDRA-6220 URL: https://issues.apache.org/jira/browse/CASSANDRA-6220 Project: Cassandra Issue Type: Bug Components: Core Reporter: Ashot Golovenko Attachments: inserts.zip I have the following table: CREATE TABLE rating ( id bigint, mid int, hid int, r double, PRIMARY KEY ((id, mid), hid)); And I get really really strange result sets on the following queries: cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329320; hid | r ---+ 201329320 | 45.476 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329220; hid | r ---+--- 201329220 | 53.62 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid in (201329320, 201329220); hid | r ---+ 201329320 | 45.476 (1 rows) -- WRONG - should be two records As you can see although both records exist I'm not able the fetch all of them using in clause. By now I have to cycle my requests which are about 30 and I find it highly inefficient given that I query physically the same row. More of that - it doesn't happen all the time! For different id values sometimes I get the correct dataset. Ideally I'd like the following select to work: SELECT hid, r FROM rating WHERE id = 755349113 and mid in ? and hid in ?; Which doesn't work either. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6194) speculative retry can sometimes violate consistency
[ https://issues.apache.org/jira/browse/CASSANDRA-6194?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802211#comment-13802211 ] Jonathan Ellis commented on CASSANDRA-6194: --- The problem is that when we speculate, we do multiple data reads, and RowDigestResolver assumes there is only one data read. (If there is more than one, it does not error out but silently drops all but one.) So if the speculative read results in triggering the callback's we have enough replies to satisfy CL logic, and the speculative data read finished before the digest, we effectively do CL.ONE logic instead of CL.QUORUM. https://github.com/jbellis/cassandra/commits/6194 includes a fix for this and also a fix for DigestMismatch logic with SR. speculative retry can sometimes violate consistency --- Key: CASSANDRA-6194 URL: https://issues.apache.org/jira/browse/CASSANDRA-6194 Project: Cassandra Issue Type: Bug Components: Core Reporter: Brandon Williams Assignee: Jonathan Ellis Fix For: 2.0.2 Attachments: 6194.txt This is most evident with intermittent failures of the short_read dtests. I'll focus on short_read_reversed_test for explanation, since that's what I used to bisect. This test inserts some columns into a row, then deletes a subset, but it performs each delete on a different node, with another node down (hints are disabled.) Finally it reads the row back at QUORUM and checks that it doesn't see any deleted columns, however with speculative retry on this often fails. I bisected this to the change that made 99th percentile SR the default reliably by looping the test enough times at each iteration to be sure it was passing or failing. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-5571) Reject bootstrapping endpoints that are already in the ring with different gossip data
[ https://issues.apache.org/jira/browse/CASSANDRA-5571?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-5571: --- Attachment: 5571-2.0-v3.patch 5571-2.0-v3.patch tolerates dead nodes when checking for endpoint collisions on bootstrap. Reject bootstrapping endpoints that are already in the ring with different gossip data -- Key: CASSANDRA-5571 URL: https://issues.apache.org/jira/browse/CASSANDRA-5571 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Rick Branson Assignee: Tyler Hobbs Attachments: 5571-2.0-v1.patch, 5571-2.0-v2.patch, 5571-2.0-v3.patch The ring can be silently broken by improperly bootstrapping an endpoint that has an existing entry in the gossip table. In the case where a node attempts to bootstrap with the same IP address as an existing ring member, the old token metadata is dropped without warning, resulting in range shifts for the cluster. This isn't so bad for non-vnode cases where, in general, tokens are explicitly assigned, and a bootstrap on the same token would result in no range shifts. For vnode cases, the convention is to just let nodes come up by selecting their own tokens, and a bootstrap will override the existing tokens for that endpoint. While there are some other issues open for adding an explicit rebootstrap feature for vnode cases, given the changes in operator habits for vnode rings, it seems a bit too easy to make this happen. Even more undesirable is the fact that it's basically silent. This is a proposal for checking for this exact case: bootstraps on endpoints with existing ring entries that have different hostIDs and/or tokens should be rejected with an error message describing what happened and how to override the safety check. It looks like the override can be supported using the existing nodetool removenode -force. I can work up a patch for this. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Comment Edited] (CASSANDRA-6109) Consider coldness in STCS compaction
[ https://issues.apache.org/jira/browse/CASSANDRA-6109?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802206#comment-13802206 ] Tyler Hobbs edited comment on CASSANDRA-6109 at 10/22/13 8:12 PM: -- bq. No, I'm suggesting instead of getBuckets(sstables), getBuckets(sstable for sstable in sstables if recents_reads_from(sstable) X) Ah, well that scheme has some problematic cases: * Many cold sstables that collectively make up a large percentage of reads may be ignored (like your 10, 1, 1, 1... case above) * It's possible to have no sstables that cross the threshold when they are equally hot was (Author: thobbs): bq. No, I'm suggesting instead of getBuckets(sstables), getBuckets(sstable for sstable in sstables if recents_reads_from(sstable) X) Ah, well that scheme has some problematic cases: * Many cold sstables that collectively make up a large percentage of reads in aggregate may be ignored (like your 10, 1, 1, 1... case above) * It's possible to have no sstables that cross the threshold when they are equally hot Consider coldness in STCS compaction Key: CASSANDRA-6109 URL: https://issues.apache.org/jira/browse/CASSANDRA-6109 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Fix For: 2.0.2 Attachments: 6109-v1.patch, 6109-v2.patch I see two options: # Don't compact cold sstables at all # Compact cold sstables only if there is nothing more important to compact The latter is better if you have cold data that may become hot again... but it's confusing if you have a workload such that you can't keep up with *all* compaction, but you can keep up with hot sstable. (Compaction backlog stat becomes useless since we fall increasingly behind.) -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-5815) NPE from migration manager
[ https://issues.apache.org/jira/browse/CASSANDRA-5815?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-5815: --- Description: In one of our production clusters we see this error often. Looking through the source, Gossiper.instance.getEndpointStateForEndpoint(endpoint) is returning null for some end point. De we need any config change on our end to resolve this? In any case, cassandra should be updated to protect against this NPE. {noformat} ERROR [OptionalTasks:1] 2013-07-24 13:40:38,972 AbstractCassandraDaemon.java (line 132) Exception in thread Thread[OptionalTasks:1,5,main] java.lang.NullPointerException at org.apache.cassandra.service.MigrationManager$1.run(MigrationManager.java:134) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) at java.util.concurrent.FutureTask.run(FutureTask.java:138) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:98) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:206) at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) at java.lang.Thread.run(Thread.java:662) {noformat} It turned out that the reason for NPE was we bootstrapped a node with the same token as another node. Cassandra should not throw an NPE here but log a meaningful error message. was: In one of our production clusters we see this error often. Looking through the source, Gossiper.instance.getEndpointStateForEndpoint(endpoint) is returning null for some end point. De we need any config change on our end to resolve this? In any case, cassandra should be updated to protect against this NPE. ERROR [OptionalTasks:1] 2013-07-24 13:40:38,972 AbstractCassandraDaemon.java (line 132) Exception in thread Thread[OptionalTasks:1,5,main] java.lang.NullPointerException at org.apache.cassandra.service.MigrationManager$1.run(MigrationManager.java:134) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) at java.util.concurrent.FutureTask.run(FutureTask.java:138) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:98) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:206) at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) at java.lang.Thread.run(Thread.java:662) It turned out that the reason for NPE was we bootstrapped a node with the same token as another node. Cassandra should not throw an NPE here but log a meaningful error message. NPE from migration manager -- Key: CASSANDRA-5815 URL: https://issues.apache.org/jira/browse/CASSANDRA-5815 Project: Cassandra Issue Type: Bug Components: Core Affects Versions: 1.1.12 Reporter: Vishy Kasar Assignee: Brandon Williams Priority: Minor Fix For: 1.2.12 Attachments: 5185.txt In one of our production clusters we see this error often. Looking through the source, Gossiper.instance.getEndpointStateForEndpoint(endpoint) is returning null for some end point. De we need any config change on our end to resolve this? In any case, cassandra should be updated to protect against this NPE. {noformat} ERROR [OptionalTasks:1] 2013-07-24 13:40:38,972 AbstractCassandraDaemon.java (line 132) Exception in thread Thread[OptionalTasks:1,5,main] java.lang.NullPointerException at org.apache.cassandra.service.MigrationManager$1.run(MigrationManager.java:134) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) at java.util.concurrent.FutureTask.run(FutureTask.java:138) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:98) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:206) at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) at java.lang.Thread.run(Thread.java:662) {noformat} It turned out that the reason for NPE was we bootstrapped a node with the same token as another node. Cassandra should not throw an NPE here but log a meaningful error message.
[jira] [Commented] (CASSANDRA-5815) NPE from migration manager
[ https://issues.apache.org/jira/browse/CASSANDRA-5815?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802224#comment-13802224 ] Tyler Hobbs commented on CASSANDRA-5815: +1 NPE from migration manager -- Key: CASSANDRA-5815 URL: https://issues.apache.org/jira/browse/CASSANDRA-5815 Project: Cassandra Issue Type: Bug Components: Core Affects Versions: 1.1.12 Reporter: Vishy Kasar Assignee: Brandon Williams Priority: Minor Fix For: 1.2.12 Attachments: 5185.txt In one of our production clusters we see this error often. Looking through the source, Gossiper.instance.getEndpointStateForEndpoint(endpoint) is returning null for some end point. De we need any config change on our end to resolve this? In any case, cassandra should be updated to protect against this NPE. {noformat} ERROR [OptionalTasks:1] 2013-07-24 13:40:38,972 AbstractCassandraDaemon.java (line 132) Exception in thread Thread[OptionalTasks:1,5,main] java.lang.NullPointerException at org.apache.cassandra.service.MigrationManager$1.run(MigrationManager.java:134) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) at java.util.concurrent.FutureTask.run(FutureTask.java:138) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:98) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:206) at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) at java.lang.Thread.run(Thread.java:662) {noformat} It turned out that the reason for NPE was we bootstrapped a node with the same token as another node. Cassandra should not throw an NPE here but log a meaningful error message. -- This message was sent by Atlassian JIRA (v6.1#6144)
[2/9] git commit: Fix SSTableLoader
Fix SSTableLoader patch by thobbs; reviewed by slebresne for CASSANDRA-6205 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7159ec10 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7159ec10 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7159ec10 Branch: refs/heads/trunk Commit: 7159ec10046fd4dce58ca9406ef096fdb965700d Parents: 66fe5bd Author: Sylvain Lebresne sylv...@datastax.com Authored: Tue Oct 22 17:42:50 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Tue Oct 22 17:42:50 2013 +0200 -- CHANGES.txt | 1 + .../org/apache/cassandra/io/sstable/SSTableReader.java| 10 -- 2 files changed, 5 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/7159ec10/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index e89ca41..ac4c010 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -26,6 +26,7 @@ * CQL3: support pre-epoch longs for TimestampType (CASSANDRA-6212) * Add reloadtriggers command to nodetool (CASSANDRA-4949) * cqlsh: ignore empty 'value alias' in DESCRIBE (CASSANDRA-6139) + * Fix sstable loader (CASSANDRA-6205) Merged from 1.2: * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Add a warning for small LCS sstable size (CASSANDRA-6191) http://git-wip-us.apache.org/repos/asf/cassandra/blob/7159ec10/src/java/org/apache/cassandra/io/sstable/SSTableReader.java -- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java index cb0873d..9837f4c 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java @@ -36,10 +36,7 @@ import org.slf4j.LoggerFactory; import org.apache.cassandra.cache.InstrumentingCache; import org.apache.cassandra.cache.KeyCacheKey; import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor; -import org.apache.cassandra.config.CFMetaData; -import org.apache.cassandra.config.ColumnDefinition; -import org.apache.cassandra.config.DatabaseDescriptor; -import org.apache.cassandra.config.Schema; +import org.apache.cassandra.config.*; import org.apache.cassandra.db.*; import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; import org.apache.cassandra.db.commitlog.ReplayPosition; @@ -327,8 +324,9 @@ public class SSTableReader extends SSTable implements Closeable deletingTask = new SSTableDeletingTask(this); -// Don't track read rates for tables in the system keyspace -if (Keyspace.SYSTEM_KS.equals(desc.ksname)) +// Don't track read rates for tables in the system keyspace and don't bother trying to load or persist +// the read meter when in client mode +if (Keyspace.SYSTEM_KS.equals(desc.ksname) || Config.isClientMode()) { readMeter = null; return;
[3/9] git commit: License and versions for 2.0.2 release
License and versions for 2.0.2 release Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b6147c1c Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b6147c1c Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b6147c1c Branch: refs/heads/trunk Commit: b6147c1c7bfca817429f1e13d4dd9096bee5e626 Parents: 7159ec1 Author: Sylvain Lebresne sylv...@datastax.com Authored: Tue Oct 22 18:01:51 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Tue Oct 22 18:01:51 2013 +0200 -- .rat-excludes | 1 + build.xml | 2 +- debian/changelog| 6 ++ examples/triggers/build.xml | 19 +++ examples/triggers/conf/InvertedIndex.properties | 19 ++- .../filter/TombstoneOverwhelmingException.java | 18 ++ .../cassandra/metrics/RestorableMeter.java | 20 +++- .../apache/cassandra/net/WriteCallbackInfo.java | 18 ++ .../cassandra/pig/CqlTableDataTypeTest.java | 18 ++ .../pig/ThriftColumnFamilyDataTypeTest.java | 18 ++ 10 files changed, 136 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/.rat-excludes -- diff --git a/.rat-excludes b/.rat-excludes index 8d2ead8..78473b9 100644 --- a/.rat-excludes +++ b/.rat-excludes @@ -15,6 +15,7 @@ lib/licenses/*.txt .settings/** **/cassandra.yaml **/cassandra-topology.yaml +**/metrics-reporter-config-sample.yaml **/*.db .externalToolBuilders/** test/data/serialization/*/* http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/build.xml -- diff --git a/build.xml b/build.xml index 2703edf..09a0213 100644 --- a/build.xml +++ b/build.xml @@ -25,7 +25,7 @@ property name=debuglevel value=source,lines,vars/ !-- default version and SCM information -- -property name=base.version value=2.0.1/ +property name=base.version value=2.0.2/ property name=scm.connection value=scm:git://git.apache.org/cassandra.git/ property name=scm.developerConnection value=scm:git://git.apache.org/cassandra.git/ property name=scm.url value=http://git-wip-us.apache.org/repos/asf?p=cassandra.git;a=tree/ http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/debian/changelog -- diff --git a/debian/changelog b/debian/changelog index 61a91d7..4d99120 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +cassandra (2.0.2) unstable; urgency=low + + * New release + + -- Sylvain Lebresne slebre...@apache.org Tue, 22 Oct 2013 17:44:30 +0200 + cassandra (2.0.1) unstable; urgency=low * New release http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/examples/triggers/build.xml -- diff --git a/examples/triggers/build.xml b/examples/triggers/build.xml index 55fccce..293b08d 100644 --- a/examples/triggers/build.xml +++ b/examples/triggers/build.xml @@ -1,4 +1,23 @@ ?xml version=1.0 encoding=UTF-8? +!-- + ~ Licensed to the Apache Software Foundation (ASF) under one + ~ or more contributor license agreements. See the NOTICE file + ~ distributed with this work for additional information + ~ regarding copyright ownership. The ASF licenses this file + ~ to you under the Apache License, Version 2.0 (the + ~ License); you may not use this file except in compliance + ~ with the License. You may obtain a copy of the License at + ~ + ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ + ~ Unless required by applicable law or agreed to in writing, + ~ software distributed under the License is distributed on an + ~ AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + ~ KIND, either express or implied. See the License for the + ~ specific language governing permissions and limitations + ~ under the License. +-- + project default=jar name=trigger-example property name=cassandra.dir value=../.. / property name=cassandra.dir.lib value=${cassandra.dir}/lib / http://git-wip-us.apache.org/repos/asf/cassandra/blob/b6147c1c/examples/triggers/conf/InvertedIndex.properties -- diff --git a/examples/triggers/conf/InvertedIndex.properties b/examples/triggers/conf/InvertedIndex.properties index 6db6d61..3ec2bb0 100644 --- a/examples/triggers/conf/InvertedIndex.properties +++ b/examples/triggers/conf/InvertedIndex.properties @@ -1,2 +1,19 @@ +# Licensed to the Apache
[7/9] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8a069587 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8a069587 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8a069587 Branch: refs/heads/trunk Commit: 8a069587d5f67d64d99e9d391e5f6a733616ab97 Parents: b6147c1 59bf44d Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 15:29:24 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 15:29:24 2013 -0500 -- src/java/org/apache/cassandra/db/HintedHandOffManager.java | 8 ++-- src/java/org/apache/cassandra/service/MigrationManager.java | 5 - 2 files changed, 10 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/8a069587/src/java/org/apache/cassandra/db/HintedHandOffManager.java -- diff --cc src/java/org/apache/cassandra/db/HintedHandOffManager.java index 67a5c68,ede49e4..f1e751f --- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java +++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java @@@ -249,22 -209,38 +249,24 @@@ public class HintedHandOffManager imple Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. - while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) + while (gossiper.getEndpointStateForEndpoint(endpoint) != null gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { -try -{ -Thread.sleep(1000); -} -catch (InterruptedException e) -{ -throw new AssertionError(e); -} +Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Didin't receive gossiped schema from + endpoint + in + 2 * StorageService.RING_DELAY + ms); } + if (gossiper.getEndpointStateForEndpoint(endpoint) == null) + throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); waited = 0; // then wait for the correct schema version. -// usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system table. +// usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) - while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( + while (gossiper.getEndpointStateForEndpoint(endpoint) != null !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { -try -{ -Thread.sleep(1000); -} -catch (InterruptedException e) -{ -throw new AssertionError(e); -} +Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Could not reach schema agreement with + endpoint + in + 2 * StorageService.RING_DELAY + ms); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8a069587/src/java/org/apache/cassandra/service/MigrationManager.java --
[4/9] git commit: Fix harmless NPE in MM/HHOM Patch by brandonwilliams, reviewed by Tyler Hobbs for CASSANDRA-5185
Fix harmless NPE in MM/HHOM Patch by brandonwilliams, reviewed by Tyler Hobbs for CASSANDRA-5185 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/59bf44dd Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/59bf44dd Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/59bf44dd Branch: refs/heads/cassandra-1.2 Commit: 59bf44dd94939cea513a74250f6002357d1f9f2c Parents: 12413ad Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 15:28:47 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 15:28:47 2013 -0500 -- src/java/org/apache/cassandra/db/HintedHandOffManager.java | 8 ++-- src/java/org/apache/cassandra/service/MigrationManager.java | 5 - 2 files changed, 10 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59bf44dd/src/java/org/apache/cassandra/db/HintedHandOffManager.java -- diff --git a/src/java/org/apache/cassandra/db/HintedHandOffManager.java b/src/java/org/apache/cassandra/db/HintedHandOffManager.java index c59..ede49e4 100644 --- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java +++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java @@ -209,7 +209,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. -while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) +while (gossiper.getEndpointStateForEndpoint(endpoint) != null gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { try { @@ -223,12 +223,14 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Didin't receive gossiped schema from + endpoint + in + 2 * StorageService.RING_DELAY + ms); } +if (gossiper.getEndpointStateForEndpoint(endpoint) == null) +throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system table. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) -while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( +while (gossiper.getEndpointStateForEndpoint(endpoint) != null !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { try @@ -243,6 +245,8 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Could not reach schema agreement with + endpoint + in + 2 * StorageService.RING_DELAY + ms); } +if (gossiper.getEndpointStateForEndpoint(endpoint) == null) +throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); logger.debug(schema for {} matches local schema, endpoint); return waited; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/59bf44dd/src/java/org/apache/cassandra/service/MigrationManager.java -- diff --git a/src/java/org/apache/cassandra/service/MigrationManager.java b/src/java/org/apache/cassandra/service/MigrationManager.java index e901b61..3ede35e 100644 --- a/src/java/org/apache/cassandra/service/MigrationManager.java +++ b/src/java/org/apache/cassandra/service/MigrationManager.java @@ -127,7 +127,10 @@ public class MigrationManager implements IEndpointStateChangeSubscriber public void run() { // grab the latest version of the schema since it may have changed again since the initial scheduling -VersionedValue value = Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA); +EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(endpoint); +if (epState == null) +
[5/9] git commit: Fix harmless NPE in MM/HHOM Patch by brandonwilliams, reviewed by Tyler Hobbs for CASSANDRA-5185
Fix harmless NPE in MM/HHOM Patch by brandonwilliams, reviewed by Tyler Hobbs for CASSANDRA-5185 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/59bf44dd Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/59bf44dd Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/59bf44dd Branch: refs/heads/trunk Commit: 59bf44dd94939cea513a74250f6002357d1f9f2c Parents: 12413ad Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 15:28:47 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 15:28:47 2013 -0500 -- src/java/org/apache/cassandra/db/HintedHandOffManager.java | 8 ++-- src/java/org/apache/cassandra/service/MigrationManager.java | 5 - 2 files changed, 10 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59bf44dd/src/java/org/apache/cassandra/db/HintedHandOffManager.java -- diff --git a/src/java/org/apache/cassandra/db/HintedHandOffManager.java b/src/java/org/apache/cassandra/db/HintedHandOffManager.java index c59..ede49e4 100644 --- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java +++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java @@ -209,7 +209,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. -while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) +while (gossiper.getEndpointStateForEndpoint(endpoint) != null gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { try { @@ -223,12 +223,14 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Didin't receive gossiped schema from + endpoint + in + 2 * StorageService.RING_DELAY + ms); } +if (gossiper.getEndpointStateForEndpoint(endpoint) == null) +throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system table. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) -while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( +while (gossiper.getEndpointStateForEndpoint(endpoint) != null !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { try @@ -243,6 +245,8 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Could not reach schema agreement with + endpoint + in + 2 * StorageService.RING_DELAY + ms); } +if (gossiper.getEndpointStateForEndpoint(endpoint) == null) +throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); logger.debug(schema for {} matches local schema, endpoint); return waited; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/59bf44dd/src/java/org/apache/cassandra/service/MigrationManager.java -- diff --git a/src/java/org/apache/cassandra/service/MigrationManager.java b/src/java/org/apache/cassandra/service/MigrationManager.java index e901b61..3ede35e 100644 --- a/src/java/org/apache/cassandra/service/MigrationManager.java +++ b/src/java/org/apache/cassandra/service/MigrationManager.java @@ -127,7 +127,10 @@ public class MigrationManager implements IEndpointStateChangeSubscriber public void run() { // grab the latest version of the schema since it may have changed again since the initial scheduling -VersionedValue value = Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA); +EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(endpoint); +if (epState == null) +
[6/9] git commit: Fix harmless NPE in MM/HHOM Patch by brandonwilliams, reviewed by Tyler Hobbs for CASSANDRA-5185
Fix harmless NPE in MM/HHOM Patch by brandonwilliams, reviewed by Tyler Hobbs for CASSANDRA-5185 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/59bf44dd Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/59bf44dd Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/59bf44dd Branch: refs/heads/cassandra-2.0 Commit: 59bf44dd94939cea513a74250f6002357d1f9f2c Parents: 12413ad Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 15:28:47 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 15:28:47 2013 -0500 -- src/java/org/apache/cassandra/db/HintedHandOffManager.java | 8 ++-- src/java/org/apache/cassandra/service/MigrationManager.java | 5 - 2 files changed, 10 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/59bf44dd/src/java/org/apache/cassandra/db/HintedHandOffManager.java -- diff --git a/src/java/org/apache/cassandra/db/HintedHandOffManager.java b/src/java/org/apache/cassandra/db/HintedHandOffManager.java index c59..ede49e4 100644 --- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java +++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java @@ -209,7 +209,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. -while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) +while (gossiper.getEndpointStateForEndpoint(endpoint) != null gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { try { @@ -223,12 +223,14 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Didin't receive gossiped schema from + endpoint + in + 2 * StorageService.RING_DELAY + ms); } +if (gossiper.getEndpointStateForEndpoint(endpoint) == null) +throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); waited = 0; // then wait for the correct schema version. // usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system table. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) -while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( +while (gossiper.getEndpointStateForEndpoint(endpoint) != null !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { try @@ -243,6 +245,8 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Could not reach schema agreement with + endpoint + in + 2 * StorageService.RING_DELAY + ms); } +if (gossiper.getEndpointStateForEndpoint(endpoint) == null) +throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); logger.debug(schema for {} matches local schema, endpoint); return waited; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/59bf44dd/src/java/org/apache/cassandra/service/MigrationManager.java -- diff --git a/src/java/org/apache/cassandra/service/MigrationManager.java b/src/java/org/apache/cassandra/service/MigrationManager.java index e901b61..3ede35e 100644 --- a/src/java/org/apache/cassandra/service/MigrationManager.java +++ b/src/java/org/apache/cassandra/service/MigrationManager.java @@ -127,7 +127,10 @@ public class MigrationManager implements IEndpointStateChangeSubscriber public void run() { // grab the latest version of the schema since it may have changed again since the initial scheduling -VersionedValue value = Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA); +EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(endpoint); +if (epState == null) +
[8/9] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8a069587 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8a069587 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8a069587 Branch: refs/heads/cassandra-2.0 Commit: 8a069587d5f67d64d99e9d391e5f6a733616ab97 Parents: b6147c1 59bf44d Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 15:29:24 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 15:29:24 2013 -0500 -- src/java/org/apache/cassandra/db/HintedHandOffManager.java | 8 ++-- src/java/org/apache/cassandra/service/MigrationManager.java | 5 - 2 files changed, 10 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/8a069587/src/java/org/apache/cassandra/db/HintedHandOffManager.java -- diff --cc src/java/org/apache/cassandra/db/HintedHandOffManager.java index 67a5c68,ede49e4..f1e751f --- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java +++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java @@@ -249,22 -209,38 +249,24 @@@ public class HintedHandOffManager imple Gossiper gossiper = Gossiper.instance; int waited = 0; // first, wait for schema to be gossiped. - while (gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) + while (gossiper.getEndpointStateForEndpoint(endpoint) != null gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA) == null) { -try -{ -Thread.sleep(1000); -} -catch (InterruptedException e) -{ -throw new AssertionError(e); -} +Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Didin't receive gossiped schema from + endpoint + in + 2 * StorageService.RING_DELAY + ms); } + if (gossiper.getEndpointStateForEndpoint(endpoint) == null) + throw new TimeoutException(Node + endpoint + vanished while waiting for agreement); waited = 0; // then wait for the correct schema version. -// usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system table. +// usually we use DD.getDefsVersion, which checks the local schema uuid as stored in the system keyspace. // here we check the one in gossip instead; this serves as a canary to warn us if we introduce a bug that // causes the two to diverge (see CASSANDRA-2946) - while (!gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( + while (gossiper.getEndpointStateForEndpoint(endpoint) != null !gossiper.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.SCHEMA).value.equals( gossiper.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()).getApplicationState(ApplicationState.SCHEMA).value)) { -try -{ -Thread.sleep(1000); -} -catch (InterruptedException e) -{ -throw new AssertionError(e); -} +Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); waited += 1000; if (waited 2 * StorageService.RING_DELAY) throw new TimeoutException(Could not reach schema agreement with + endpoint + in + 2 * StorageService.RING_DELAY + ms); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8a069587/src/java/org/apache/cassandra/service/MigrationManager.java --
[9/9] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Conflicts: build.xml Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4d82ac9d Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4d82ac9d Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4d82ac9d Branch: refs/heads/trunk Commit: 4d82ac9d29a2f329fa7236704b06464430d53f76 Parents: 0bff97a 8a06958 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 15:30:07 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 15:30:07 2013 -0500 -- .rat-excludes | 1 + CHANGES.txt | 1 + NEWS.txt| 2 -- debian/changelog| 6 ++ examples/triggers/build.xml | 19 +++ examples/triggers/conf/InvertedIndex.properties | 19 ++- .../org/apache/cassandra/config/CFMetaData.java | 2 +- .../cassandra/db/HintedHandOffManager.java | 8 ++-- .../filter/TombstoneOverwhelmingException.java | 18 ++ .../cassandra/io/sstable/SSTableReader.java | 10 -- .../cassandra/metrics/RestorableMeter.java | 20 +++- .../apache/cassandra/net/WriteCallbackInfo.java | 18 ++ .../cassandra/service/MigrationManager.java | 5 - .../cassandra/pig/CqlTableDataTypeTest.java | 18 ++ .../pig/ThriftColumnFamilyDataTypeTest.java | 18 ++ 15 files changed, 151 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4d82ac9d/CHANGES.txt -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4d82ac9d/NEWS.txt -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4d82ac9d/src/java/org/apache/cassandra/config/CFMetaData.java -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4d82ac9d/src/java/org/apache/cassandra/db/HintedHandOffManager.java -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4d82ac9d/src/java/org/apache/cassandra/io/sstable/SSTableReader.java -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4d82ac9d/src/java/org/apache/cassandra/service/MigrationManager.java --
[1/9] git commit: Revert use of speculative retries by default for now
Updated Branches: refs/heads/cassandra-1.2 12413ad1f - 59bf44dd9 refs/heads/cassandra-2.0 b6147c1c7 - 8a069587d refs/heads/trunk 0bff97a2c - 4d82ac9d2 Revert use of speculative retries by default for now Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/66fe5bda Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/66fe5bda Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/66fe5bda Branch: refs/heads/trunk Commit: 66fe5bdaaa1f5e22d33528c3d2313cc23e7621d8 Parents: dfb9765 Author: Sylvain Lebresne sylv...@datastax.com Authored: Tue Oct 22 16:08:53 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Tue Oct 22 16:08:53 2013 +0200 -- NEWS.txt | 2 -- src/java/org/apache/cassandra/config/CFMetaData.java | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/66fe5bda/NEWS.txt -- diff --git a/NEWS.txt b/NEWS.txt index 69ab4fd..e39d5cd 100644 --- a/NEWS.txt +++ b/NEWS.txt @@ -19,8 +19,6 @@ using the provided 'sstableupgrade' tool. New features -- Speculative retry defaults to 99th percentile - (See blog post at http://www.datastax.com/dev/blog/rapid-read-protection-in-cassandra-2-0-2) - Configurable metrics reporting (see conf/metrics-reporter-config-sample.yaml) - Compaction history and stats are now saved to system keyspace http://git-wip-us.apache.org/repos/asf/cassandra/blob/66fe5bda/src/java/org/apache/cassandra/config/CFMetaData.java -- diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java index d63ee01..479dafc 100644 --- a/src/java/org/apache/cassandra/config/CFMetaData.java +++ b/src/java/org/apache/cassandra/config/CFMetaData.java @@ -83,7 +83,7 @@ public final class CFMetaData public final static Class? extends AbstractCompactionStrategy DEFAULT_COMPACTION_STRATEGY_CLASS = SizeTieredCompactionStrategy.class; public final static Caching DEFAULT_CACHING_STRATEGY = Caching.KEYS_ONLY; public final static int DEFAULT_DEFAULT_TIME_TO_LIVE = 0; -public final static SpeculativeRetry DEFAULT_SPECULATIVE_RETRY = new SpeculativeRetry(SpeculativeRetry.RetryType.PERCENTILE, 0.99); +public final static SpeculativeRetry DEFAULT_SPECULATIVE_RETRY = new SpeculativeRetry(SpeculativeRetry.RetryType.NONE, 0); public final static int DEFAULT_INDEX_INTERVAL = 128; public final static boolean DEFAULT_POPULATE_IO_CACHE_ON_FLUSH = false;
[jira] [Commented] (CASSANDRA-6194) speculative retry can sometimes violate consistency
[ https://issues.apache.org/jira/browse/CASSANDRA-6194?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802234#comment-13802234 ] Brandon Williams commented on CASSANDRA-6194: - This fixes the test for me. speculative retry can sometimes violate consistency --- Key: CASSANDRA-6194 URL: https://issues.apache.org/jira/browse/CASSANDRA-6194 Project: Cassandra Issue Type: Bug Components: Core Reporter: Brandon Williams Assignee: Jonathan Ellis Fix For: 2.0.2 Attachments: 6194.txt This is most evident with intermittent failures of the short_read dtests. I'll focus on short_read_reversed_test for explanation, since that's what I used to bisect. This test inserts some columns into a row, then deletes a subset, but it performs each delete on a different node, with another node down (hints are disabled.) Finally it reads the row back at QUORUM and checks that it doesn't see any deleted columns, however with speculative retry on this often fails. I bisected this to the change that made 99th percentile SR the default reliably by looping the test enough times at each iteration to be sure it was passing or failing. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6220) Unable to select multiple entries using In clause on clustering part of compound key
[ https://issues.apache.org/jira/browse/CASSANDRA-6220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802237#comment-13802237 ] Constance Eustace commented on CASSANDRA-6220: -- If I do this sequence: DROP SCHEMA CREATE SCHEMA CREATE INITIAL DATA (i.e. no updates to existing data) NODETOOL COMPACT -- magic sauce MASSIVE INSERT + SIMULTANEOUS UPDATES to INITIAL DATA does not reproduce. The nodetool compact after the schema creation seems to reset/stabilize the database. I used to replicate very reliably after about 300,000 inserts / 2000 updates. Now I do 1.75million inserts with 20,000 updates and no reproduction. Unable to select multiple entries using In clause on clustering part of compound key Key: CASSANDRA-6220 URL: https://issues.apache.org/jira/browse/CASSANDRA-6220 Project: Cassandra Issue Type: Bug Components: Core Reporter: Ashot Golovenko Attachments: inserts.zip I have the following table: CREATE TABLE rating ( id bigint, mid int, hid int, r double, PRIMARY KEY ((id, mid), hid)); And I get really really strange result sets on the following queries: cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329320; hid | r ---+ 201329320 | 45.476 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329220; hid | r ---+--- 201329220 | 53.62 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid in (201329320, 201329220); hid | r ---+ 201329320 | 45.476 (1 rows) -- WRONG - should be two records As you can see although both records exist I'm not able the fetch all of them using in clause. By now I have to cycle my requests which are about 30 and I find it highly inefficient given that I query physically the same row. More of that - it doesn't happen all the time! For different id values sometimes I get the correct dataset. Ideally I'd like the following select to work: SELECT hid, r FROM rating WHERE id = 755349113 and mid in ? and hid in ?; Which doesn't work either. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Comment Edited] (CASSANDRA-6220) Unable to select multiple entries using In clause on clustering part of compound key
[ https://issues.apache.org/jira/browse/CASSANDRA-6220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802237#comment-13802237 ] Constance Eustace edited comment on CASSANDRA-6220 at 10/22/13 8:41 PM: If I do this sequence: DROP SCHEMA CREATE SCHEMA CREATE INITIAL DATA (i.e. no updates to existing data) NODETOOL COMPACT -- magic sauce MASSIVE INSERT + SIMULTANEOUS UPDATES to INITIAL DATA does not reproduce. The nodetool compact after the schema creation seems to reset/stabilize the database. I used to replicate very reliably after about 300,000 inserts / 2000 updates. Now I do 1.75million inserts with 20,000 updates and no reproduction. Obviously you could probably run the nodetool compact after the SCHEMA creation, and then do initial data creation/update+insert run was (Author: cowardlydragon): If I do this sequence: DROP SCHEMA CREATE SCHEMA CREATE INITIAL DATA (i.e. no updates to existing data) NODETOOL COMPACT -- magic sauce MASSIVE INSERT + SIMULTANEOUS UPDATES to INITIAL DATA does not reproduce. The nodetool compact after the schema creation seems to reset/stabilize the database. I used to replicate very reliably after about 300,000 inserts / 2000 updates. Now I do 1.75million inserts with 20,000 updates and no reproduction. Unable to select multiple entries using In clause on clustering part of compound key Key: CASSANDRA-6220 URL: https://issues.apache.org/jira/browse/CASSANDRA-6220 Project: Cassandra Issue Type: Bug Components: Core Reporter: Ashot Golovenko Attachments: inserts.zip I have the following table: CREATE TABLE rating ( id bigint, mid int, hid int, r double, PRIMARY KEY ((id, mid), hid)); And I get really really strange result sets on the following queries: cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329320; hid | r ---+ 201329320 | 45.476 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329220; hid | r ---+--- 201329220 | 53.62 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid in (201329320, 201329220); hid | r ---+ 201329320 | 45.476 (1 rows) -- WRONG - should be two records As you can see although both records exist I'm not able the fetch all of them using in clause. By now I have to cycle my requests which are about 30 and I find it highly inefficient given that I query physically the same row. More of that - it doesn't happen all the time! For different id values sometimes I get the correct dataset. Ideally I'd like the following select to work: SELECT hid, r FROM rating WHERE id = 755349113 and mid in ? and hid in ?; Which doesn't work either. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-6220) Unable to select multiple entries using In clause on clustering part of compound key
[ https://issues.apache.org/jira/browse/CASSANDRA-6220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802269#comment-13802269 ] Constance Eustace commented on CASSANDRA-6220: -- It may also require invalidatekeycache / caches, possibly with a flush in there as well... Unable to select multiple entries using In clause on clustering part of compound key Key: CASSANDRA-6220 URL: https://issues.apache.org/jira/browse/CASSANDRA-6220 Project: Cassandra Issue Type: Bug Components: Core Reporter: Ashot Golovenko Attachments: inserts.zip I have the following table: CREATE TABLE rating ( id bigint, mid int, hid int, r double, PRIMARY KEY ((id, mid), hid)); And I get really really strange result sets on the following queries: cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329320; hid | r ---+ 201329320 | 45.476 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329220; hid | r ---+--- 201329220 | 53.62 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid in (201329320, 201329220); hid | r ---+ 201329320 | 45.476 (1 rows) -- WRONG - should be two records As you can see although both records exist I'm not able the fetch all of them using in clause. By now I have to cycle my requests which are about 30 and I find it highly inefficient given that I query physically the same row. More of that - it doesn't happen all the time! For different id values sometimes I get the correct dataset. Ideally I'd like the following select to work: SELECT hid, r FROM rating WHERE id = 755349113 and mid in ? and hid in ?; Which doesn't work either. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Comment Edited] (CASSANDRA-6220) Unable to select multiple entries using In clause on clustering part of compound key
[ https://issues.apache.org/jira/browse/CASSANDRA-6220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802269#comment-13802269 ] Constance Eustace edited comment on CASSANDRA-6220 at 10/22/13 9:11 PM: I was able to reproduce the original way of reproduction (drop schema, create schema, INSERT / UPDATE with no nodetool compact in there). Post-repair of the corruption seemed to require nodetool compact, invalidatekeycache, and/or possibly flush. Now that I've repaired. I'm going to run a 3.5 million insert + simulataneous update run to see if the nodetool compact repair makes the data more durable. was (Author: cowardlydragon): It may also require invalidatekeycache / caches, possibly with a flush in there as well... Unable to select multiple entries using In clause on clustering part of compound key Key: CASSANDRA-6220 URL: https://issues.apache.org/jira/browse/CASSANDRA-6220 Project: Cassandra Issue Type: Bug Components: Core Reporter: Ashot Golovenko Attachments: inserts.zip I have the following table: CREATE TABLE rating ( id bigint, mid int, hid int, r double, PRIMARY KEY ((id, mid), hid)); And I get really really strange result sets on the following queries: cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329320; hid | r ---+ 201329320 | 45.476 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329220; hid | r ---+--- 201329220 | 53.62 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid in (201329320, 201329220); hid | r ---+ 201329320 | 45.476 (1 rows) -- WRONG - should be two records As you can see although both records exist I'm not able the fetch all of them using in clause. By now I have to cycle my requests which are about 30 and I find it highly inefficient given that I query physically the same row. More of that - it doesn't happen all the time! For different id values sometimes I get the correct dataset. Ideally I'd like the following select to work: SELECT hid, r FROM rating WHERE id = 755349113 and mid in ? and hid in ?; Which doesn't work either. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Comment Edited] (CASSANDRA-6220) Unable to select multiple entries using In clause on clustering part of compound key
[ https://issues.apache.org/jira/browse/CASSANDRA-6220?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802269#comment-13802269 ] Constance Eustace edited comment on CASSANDRA-6220 at 10/22/13 9:12 PM: I was able to reproduce the original way of reproduction (drop schema, create schema, INSERT / UPDATE with no nodetool compact in there). Post-repair of the corruption seemed to require nodetool compact, invalidatekeycache, and/or possibly flush. Now that I've repaired. I'm going to run a 3.5 million insert + simulataneous update run to see if the nodetool compact repair makes the data more durable, as has been seen today before. was (Author: cowardlydragon): I was able to reproduce the original way of reproduction (drop schema, create schema, INSERT / UPDATE with no nodetool compact in there). Post-repair of the corruption seemed to require nodetool compact, invalidatekeycache, and/or possibly flush. Now that I've repaired. I'm going to run a 3.5 million insert + simulataneous update run to see if the nodetool compact repair makes the data more durable. Unable to select multiple entries using In clause on clustering part of compound key Key: CASSANDRA-6220 URL: https://issues.apache.org/jira/browse/CASSANDRA-6220 Project: Cassandra Issue Type: Bug Components: Core Reporter: Ashot Golovenko Attachments: inserts.zip I have the following table: CREATE TABLE rating ( id bigint, mid int, hid int, r double, PRIMARY KEY ((id, mid), hid)); And I get really really strange result sets on the following queries: cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329320; hid | r ---+ 201329320 | 45.476 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid = 201329220; hid | r ---+--- 201329220 | 53.62 (1 rows) cqlsh:bm SELECT hid, r FROM rating WHERE id = 755349113 and mid = 201310 and hid in (201329320, 201329220); hid | r ---+ 201329320 | 45.476 (1 rows) -- WRONG - should be two records As you can see although both records exist I'm not able the fetch all of them using in clause. By now I have to cycle my requests which are about 30 and I find it highly inefficient given that I query physically the same row. More of that - it doesn't happen all the time! For different id values sometimes I get the correct dataset. Ideally I'd like the following select to work: SELECT hid, r FROM rating WHERE id = 755349113 and mid in ? and hid in ?; Which doesn't work either. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-5936) Improve the way we pick L0 compaction candidates
[ https://issues.apache.org/jira/browse/CASSANDRA-5936?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802295#comment-13802295 ] T Jake Luciani commented on CASSANDRA-5936: --- [~mericsson] Have you looked at all at promoting any L0 sstable to a level with no overlaps? LevelDB does this and I think it would give LCS a lot more breathing room before it starts hitting the wall. It would also promote more cross level parallel compactions. Improve the way we pick L0 compaction candidates Key: CASSANDRA-5936 URL: https://issues.apache.org/jira/browse/CASSANDRA-5936 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Marcus Eriksson Assignee: Marcus Eriksson Fix For: 2.1 We could improve the way we pick compaction candidates in level 0 in LCS. The most common way for us to get behind on compaction is after repairs, we should exploit the fact that the streamed sstables are most often very narrow in range since the other nodes in the ring will have a similar sstable-range-distribution. We should in theory be able to do 10 concurrent compactions involving L1 - ie, partition L0 in buckets defined by the sstables in L1 to only keep one L1 SSTable busy for every compaction (be it L1 to L2 or L0 to L1). we will need some heuristics on when to select candidates from the buckets and when to do it the old way (since L0 sstables can span several L1 sstables) -- This message was sent by Atlassian JIRA (v6.1#6144)
[2/3] git commit: Reject bootstrapping endpoints that are already in the ring with different gossip data Patch by Tyler Hobbs, reviewed by brandonwilliams for CASSANDRA-5571
Reject bootstrapping endpoints that are already in the ring with different gossip data Patch by Tyler Hobbs, reviewed by brandonwilliams for CASSANDRA-5571 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4155afd5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4155afd5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4155afd5 Branch: refs/heads/trunk Commit: 4155afd510828770c2d666a7e4cea28062964130 Parents: 8a06958 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:44:35 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:44:35 2013 -0500 -- src/java/org/apache/cassandra/gms/Gossiper.java | 2 +- .../cassandra/service/StorageService.java | 30 ++-- 2 files changed, 28 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4155afd5/src/java/org/apache/cassandra/gms/Gossiper.java -- diff --git a/src/java/org/apache/cassandra/gms/Gossiper.java b/src/java/org/apache/cassandra/gms/Gossiper.java index c97a54b..07c21bd 100644 --- a/src/java/org/apache/cassandra/gms/Gossiper.java +++ b/src/java/org/apache/cassandra/gms/Gossiper.java @@ -864,7 +864,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean subscriber.onJoin(ep, epState); } -private boolean isDeadState(EndpointState epState) +public boolean isDeadState(EndpointState epState) { if (epState.getApplicationState(ApplicationState.STATUS) == null) return false; http://git-wip-us.apache.org/repos/asf/cassandra/blob/4155afd5/src/java/org/apache/cassandra/service/StorageService.java -- diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java index f171192..f9126c7 100644 --- a/src/java/org/apache/cassandra/service/StorageService.java +++ b/src/java/org/apache/cassandra/service/StorageService.java @@ -408,6 +408,22 @@ public class StorageService extends NotificationBroadcasterSupport implements IE } } +public synchronized void checkForEndpointCollision() throws ConfigurationException +{ +logger.debug(Starting shadow gossip round to check for endpoint collision); +MessagingService.instance().listen(FBUtilities.getLocalAddress()); +Gossiper.instance.doShadowRound(); +EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()); +if (epState != null !Gossiper.instance.isDeadState(epState)) +{ +throw new RuntimeException(String.format(A node with address %s already exists, cancelling join. + + Use cassandra.replace_address if you want to replace this node., + FBUtilities.getBroadcastAddress())); +} +MessagingService.instance().shutdown(); +Gossiper.instance.resetEndpointStateMap(); +} + public synchronized void initClient() throws ConfigurationException { // We don't wait, because we're going to actually try to work on @@ -564,6 +580,11 @@ public class StorageService extends NotificationBroadcasterSupport implements IE } } +private boolean shouldBootstrap() +{ +return DatabaseDescriptor.isAutoBootstrap() !SystemKeyspace.bootstrapComplete() !DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddress()); +} + private void joinTokenRing(int delay) throws ConfigurationException { joined = true; @@ -581,6 +602,11 @@ public class StorageService extends NotificationBroadcasterSupport implements IE appStates.put(ApplicationState.STATUS, valueFactory.hibernate(true)); appStates.put(ApplicationState.TOKENS, valueFactory.tokens(tokens)); } +else if (shouldBootstrap()) +{ +checkForEndpointCollision(); +} + // have to start the gossip service before we can see any info on other nodes. this is necessary // for bootstrap to get the load info it needs. // (we won't be part of the storage ring though until we add a counterId to our state, below.) @@ -621,9 +647,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE SystemKeyspace.bootstrapInProgress(), SystemKeyspace.bootstrapComplete(), DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddress())); -if
[3/3] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dba12f38 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dba12f38 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dba12f38 Branch: refs/heads/trunk Commit: dba12f386dd2b5637a978c7ed1e345bfa96f93de Parents: 4d82ac9 4155afd Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:45:32 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:45:32 2013 -0500 -- src/java/org/apache/cassandra/gms/Gossiper.java | 2 +- .../cassandra/service/StorageService.java | 30 ++-- 2 files changed, 28 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dba12f38/src/java/org/apache/cassandra/gms/Gossiper.java -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dba12f38/src/java/org/apache/cassandra/service/StorageService.java --
[1/3] git commit: Reject bootstrapping endpoints that are already in the ring with different gossip data Patch by Tyler Hobbs, reviewed by brandonwilliams for CASSANDRA-5571
Updated Branches: refs/heads/cassandra-2.0 8a069587d - 4155afd51 refs/heads/trunk 4d82ac9d2 - dba12f386 Reject bootstrapping endpoints that are already in the ring with different gossip data Patch by Tyler Hobbs, reviewed by brandonwilliams for CASSANDRA-5571 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4155afd5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4155afd5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4155afd5 Branch: refs/heads/cassandra-2.0 Commit: 4155afd510828770c2d666a7e4cea28062964130 Parents: 8a06958 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:44:35 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:44:35 2013 -0500 -- src/java/org/apache/cassandra/gms/Gossiper.java | 2 +- .../cassandra/service/StorageService.java | 30 ++-- 2 files changed, 28 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4155afd5/src/java/org/apache/cassandra/gms/Gossiper.java -- diff --git a/src/java/org/apache/cassandra/gms/Gossiper.java b/src/java/org/apache/cassandra/gms/Gossiper.java index c97a54b..07c21bd 100644 --- a/src/java/org/apache/cassandra/gms/Gossiper.java +++ b/src/java/org/apache/cassandra/gms/Gossiper.java @@ -864,7 +864,7 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean subscriber.onJoin(ep, epState); } -private boolean isDeadState(EndpointState epState) +public boolean isDeadState(EndpointState epState) { if (epState.getApplicationState(ApplicationState.STATUS) == null) return false; http://git-wip-us.apache.org/repos/asf/cassandra/blob/4155afd5/src/java/org/apache/cassandra/service/StorageService.java -- diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java index f171192..f9126c7 100644 --- a/src/java/org/apache/cassandra/service/StorageService.java +++ b/src/java/org/apache/cassandra/service/StorageService.java @@ -408,6 +408,22 @@ public class StorageService extends NotificationBroadcasterSupport implements IE } } +public synchronized void checkForEndpointCollision() throws ConfigurationException +{ +logger.debug(Starting shadow gossip round to check for endpoint collision); +MessagingService.instance().listen(FBUtilities.getLocalAddress()); +Gossiper.instance.doShadowRound(); +EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress()); +if (epState != null !Gossiper.instance.isDeadState(epState)) +{ +throw new RuntimeException(String.format(A node with address %s already exists, cancelling join. + + Use cassandra.replace_address if you want to replace this node., + FBUtilities.getBroadcastAddress())); +} +MessagingService.instance().shutdown(); +Gossiper.instance.resetEndpointStateMap(); +} + public synchronized void initClient() throws ConfigurationException { // We don't wait, because we're going to actually try to work on @@ -564,6 +580,11 @@ public class StorageService extends NotificationBroadcasterSupport implements IE } } +private boolean shouldBootstrap() +{ +return DatabaseDescriptor.isAutoBootstrap() !SystemKeyspace.bootstrapComplete() !DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddress()); +} + private void joinTokenRing(int delay) throws ConfigurationException { joined = true; @@ -581,6 +602,11 @@ public class StorageService extends NotificationBroadcasterSupport implements IE appStates.put(ApplicationState.STATUS, valueFactory.hibernate(true)); appStates.put(ApplicationState.TOKENS, valueFactory.tokens(tokens)); } +else if (shouldBootstrap()) +{ +checkForEndpointCollision(); +} + // have to start the gossip service before we can see any info on other nodes. this is necessary // for bootstrap to get the load info it needs. // (we won't be part of the storage ring though until we add a counterId to our state, below.) @@ -621,9 +647,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE SystemKeyspace.bootstrapInProgress(), SystemKeyspace.bootstrapComplete(),
[1/3] git commit: update changes
Updated Branches: refs/heads/cassandra-2.0 4155afd51 - 2629e5e4e refs/heads/trunk dba12f386 - 6cb5da88a update changes Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2629e5e4 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2629e5e4 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2629e5e4 Branch: refs/heads/cassandra-2.0 Commit: 2629e5e4ee0afbce6f12d54a1ee301501348d8fb Parents: 4155afd Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:49:03 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:49:03 2013 -0500 -- CHANGES.txt | 4 1 file changed, 4 insertions(+) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/2629e5e4/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index ac4c010..c01b4a8 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,7 @@ +2.0.3 + * Reject bootstrapping if the node already exists in gossip (CASSANDRA-5571) + + 2.0.2 * Update FailureDetector to use nanontime (CASSANDRA-4925) * Fix FileCacheService regressions (CASSANDRA-6149)
[3/3] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Conflicts: CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6cb5da88 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6cb5da88 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6cb5da88 Branch: refs/heads/trunk Commit: 6cb5da88a3703de57a118e063adde453b53a68f9 Parents: dba12f3 2629e5e Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:49:32 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:49:32 2013 -0500 -- CHANGES.txt | 4 1 file changed, 4 insertions(+) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6cb5da88/CHANGES.txt -- diff --cc CHANGES.txt index cf1a384,c01b4a8..407c317 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,12 -1,7 +1,16 @@@ +2.1 + * Parallelize fetching rows for low-cardinality indexes (CASSANDRA-1337) + * change logging from log4j to logback (CASSANDRA-5883) + * switch to LZ4 compression for internode communication (CASSANDRA-5887) + * Stop using Thrift-generated Index* classes internally (CASSANDRA-5971) + * Remove 1.2 network compatibility code (CASSANDRA-5960) + * Remove leveled json manifest migration code (CASSANDRA-5996) + + + 2.0.3 + * Reject bootstrapping if the node already exists in gossip (CASSANDRA-5571) + + 2.0.2 * Update FailureDetector to use nanontime (CASSANDRA-4925) * Fix FileCacheService regressions (CASSANDRA-6149)
[2/3] git commit: update changes
update changes Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2629e5e4 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2629e5e4 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2629e5e4 Branch: refs/heads/trunk Commit: 2629e5e4ee0afbce6f12d54a1ee301501348d8fb Parents: 4155afd Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:49:03 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:49:03 2013 -0500 -- CHANGES.txt | 4 1 file changed, 4 insertions(+) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/2629e5e4/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index ac4c010..c01b4a8 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,7 @@ +2.0.3 + * Reject bootstrapping if the node already exists in gossip (CASSANDRA-5571) + + 2.0.2 * Update FailureDetector to use nanontime (CASSANDRA-4925) * Fix FileCacheService regressions (CASSANDRA-6149)
[jira] [Commented] (CASSANDRA-5936) Improve the way we pick L0 compaction candidates
[ https://issues.apache.org/jira/browse/CASSANDRA-5936?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802328#comment-13802328 ] Jonathan Ellis commented on CASSANDRA-5936: --- Can you link the LevelDB source in question? I don't remember seeing that. Curious when it decides to do that vs merging w/ L1. Improve the way we pick L0 compaction candidates Key: CASSANDRA-5936 URL: https://issues.apache.org/jira/browse/CASSANDRA-5936 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Marcus Eriksson Assignee: Marcus Eriksson Fix For: 2.1 We could improve the way we pick compaction candidates in level 0 in LCS. The most common way for us to get behind on compaction is after repairs, we should exploit the fact that the streamed sstables are most often very narrow in range since the other nodes in the ring will have a similar sstable-range-distribution. We should in theory be able to do 10 concurrent compactions involving L1 - ie, partition L0 in buckets defined by the sstables in L1 to only keep one L1 SSTable busy for every compaction (be it L1 to L2 or L0 to L1). we will need some heuristics on when to select candidates from the buckets and when to do it the old way (since L0 sstables can span several L1 sstables) -- This message was sent by Atlassian JIRA (v6.1#6144)
[6/6] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/aca45fe9 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/aca45fe9 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/aca45fe9 Branch: refs/heads/trunk Commit: aca45fe99503aba03dae813da9af3267f214f69b Parents: 6cb5da8 bee0b4b Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:58:45 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:58:45 2013 -0500 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/aca45fe9/CHANGES.txt --
[5/6] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/bee0b4b9 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/bee0b4b9 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/bee0b4b9 Branch: refs/heads/cassandra-2.0 Commit: bee0b4b9df0b633580ba5e22671423967f4f2244 Parents: 2629e5e df77881 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:58:27 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:58:27 2013 -0500 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/bee0b4b9/CHANGES.txt -- diff --cc CHANGES.txt index c01b4a8,b5f83b7..5abb16b --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,41 -1,13 +1,41 @@@ -1.2.12 - * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) - * Fix altering column types (CASSANDRA-6185) - * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) +2.0.3 + * Reject bootstrapping if the node already exists in gossip (CASSANDRA-5571) -1.2.11 +2.0.2 + * Update FailureDetector to use nanontime (CASSANDRA-4925) + * Fix FileCacheService regressions (CASSANDRA-6149) + * Never return WriteTimeout for CL.ANY (CASSANDRA-6032) + * Fix race conditions in bulk loader (CASSANDRA-6129) + * Add configurable metrics reporting (CASSANDRA-4430) + * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117) + * Track and persist sstable read activity (CASSANDRA-5515) + * Fixes for speculative retry (CASSANDRA-5932) + * Improve memory usage of metadata min/max column names (CASSANDRA-6077) + * Fix thrift validation refusing row markers on CQL3 tables (CASSANDRA-6081) + * Fix insertion of collections with CAS (CASSANDRA-6069) + * Correctly send metadata on SELECT COUNT (CASSANDRA-6080) + * Track clients' remote addresses in ClientState (CASSANDRA-6070) + * Create snapshot dir if it does not exist when migrating + leveled manifest (CASSANDRA-6093) + * make sequential nodetool repair the default (CASSANDRA-5950) + * Add more hooks for compaction strategy implementations (CASSANDRA-6111) + * Fix potential NPE on composite 2ndary indexes (CASSANDRA-6098) + * Delete can potentially be skipped in batch (CASSANDRA-6115) + * Allow alter keyspace on system_traces (CASSANDRA-6016) + * Disallow empty column names in cql (CASSANDRA-6136) + * Use Java7 file-handling APIs and fix file moving on Windows (CASSANDRA-5383) + * Save compaction history to system keyspace (CASSANDRA-5078) + * Fix NPE if StorageService.getOperationMode() is executed before full startup (CASSANDRA-6166) + * CQL3: support pre-epoch longs for TimestampType (CASSANDRA-6212) + * Add reloadtriggers command to nodetool (CASSANDRA-4949) + * cqlsh: ignore empty 'value alias' in DESCRIBE (CASSANDRA-6139) + * Fix sstable loader (CASSANDRA-6205) +Merged from 1.2: + * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) - * Mark CF clean if a mutation raced the drop and got it marked dirty + * Mark CF clean if a mutation raced the drop and got it marked dirty (CASSANDRA-5946) * Add a LOCAL_ONE consistency level (CASSANDRA-6202) * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107) * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
[3/6] git commit: add ticket number to changes
add ticket number to changes Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/df778818 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/df778818 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/df778818 Branch: refs/heads/trunk Commit: df778818bf9eb5c3fe88f5787c6018c73f5c8a9b Parents: 59bf44d Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:58:20 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:58:20 2013 -0500 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/df778818/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 4d07d78..b5f83b7 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -7,7 +7,7 @@ 1.2.11 * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) - * Mark CF clean if a mutation raced the drop and got it marked dirty + * Mark CF clean if a mutation raced the drop and got it marked dirty (CASSANDRA-5946) * Add a LOCAL_ONE consistency level (CASSANDRA-6202) * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107) * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
[1/6] git commit: add ticket number to changes
Updated Branches: refs/heads/cassandra-1.2 59bf44dd9 - df778818b refs/heads/cassandra-2.0 2629e5e4e - bee0b4b9d refs/heads/trunk 6cb5da88a - aca45fe99 add ticket number to changes Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/df778818 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/df778818 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/df778818 Branch: refs/heads/cassandra-1.2 Commit: df778818bf9eb5c3fe88f5787c6018c73f5c8a9b Parents: 59bf44d Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:58:20 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:58:20 2013 -0500 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/df778818/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 4d07d78..b5f83b7 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -7,7 +7,7 @@ 1.2.11 * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) - * Mark CF clean if a mutation raced the drop and got it marked dirty + * Mark CF clean if a mutation raced the drop and got it marked dirty (CASSANDRA-5946) * Add a LOCAL_ONE consistency level (CASSANDRA-6202) * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107) * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
[4/6] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/bee0b4b9 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/bee0b4b9 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/bee0b4b9 Branch: refs/heads/trunk Commit: bee0b4b9df0b633580ba5e22671423967f4f2244 Parents: 2629e5e df77881 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:58:27 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:58:27 2013 -0500 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/bee0b4b9/CHANGES.txt -- diff --cc CHANGES.txt index c01b4a8,b5f83b7..5abb16b --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,41 -1,13 +1,41 @@@ -1.2.12 - * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) - * Fix altering column types (CASSANDRA-6185) - * cqlsh: fix CREATE/ALTER WITH completion (CASSANDRA-6196) +2.0.3 + * Reject bootstrapping if the node already exists in gossip (CASSANDRA-5571) -1.2.11 +2.0.2 + * Update FailureDetector to use nanontime (CASSANDRA-4925) + * Fix FileCacheService regressions (CASSANDRA-6149) + * Never return WriteTimeout for CL.ANY (CASSANDRA-6032) + * Fix race conditions in bulk loader (CASSANDRA-6129) + * Add configurable metrics reporting (CASSANDRA-4430) + * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117) + * Track and persist sstable read activity (CASSANDRA-5515) + * Fixes for speculative retry (CASSANDRA-5932) + * Improve memory usage of metadata min/max column names (CASSANDRA-6077) + * Fix thrift validation refusing row markers on CQL3 tables (CASSANDRA-6081) + * Fix insertion of collections with CAS (CASSANDRA-6069) + * Correctly send metadata on SELECT COUNT (CASSANDRA-6080) + * Track clients' remote addresses in ClientState (CASSANDRA-6070) + * Create snapshot dir if it does not exist when migrating + leveled manifest (CASSANDRA-6093) + * make sequential nodetool repair the default (CASSANDRA-5950) + * Add more hooks for compaction strategy implementations (CASSANDRA-6111) + * Fix potential NPE on composite 2ndary indexes (CASSANDRA-6098) + * Delete can potentially be skipped in batch (CASSANDRA-6115) + * Allow alter keyspace on system_traces (CASSANDRA-6016) + * Disallow empty column names in cql (CASSANDRA-6136) + * Use Java7 file-handling APIs and fix file moving on Windows (CASSANDRA-5383) + * Save compaction history to system keyspace (CASSANDRA-5078) + * Fix NPE if StorageService.getOperationMode() is executed before full startup (CASSANDRA-6166) + * CQL3: support pre-epoch longs for TimestampType (CASSANDRA-6212) + * Add reloadtriggers command to nodetool (CASSANDRA-4949) + * cqlsh: ignore empty 'value alias' in DESCRIBE (CASSANDRA-6139) + * Fix sstable loader (CASSANDRA-6205) +Merged from 1.2: + * (Hadoop) Require CFRR batchSize to be at least 2 (CASSANDRA-6114) * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) - * Mark CF clean if a mutation raced the drop and got it marked dirty + * Mark CF clean if a mutation raced the drop and got it marked dirty (CASSANDRA-5946) * Add a LOCAL_ONE consistency level (CASSANDRA-6202) * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107) * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
[2/6] git commit: add ticket number to changes
add ticket number to changes Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/df778818 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/df778818 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/df778818 Branch: refs/heads/cassandra-2.0 Commit: df778818bf9eb5c3fe88f5787c6018c73f5c8a9b Parents: 59bf44d Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 16:58:20 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 16:58:20 2013 -0500 -- CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/df778818/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 4d07d78..b5f83b7 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -7,7 +7,7 @@ 1.2.11 * Add a warning for small LCS sstable size (CASSANDRA-6191) * Add ability to list specific KS/CF combinations in nodetool cfstats (CASSANDRA-4191) - * Mark CF clean if a mutation raced the drop and got it marked dirty + * Mark CF clean if a mutation raced the drop and got it marked dirty (CASSANDRA-5946) * Add a LOCAL_ONE consistency level (CASSANDRA-6202) * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107) * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
[4/6] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/ebd50ff2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/ebd50ff2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/ebd50ff2 Branch: refs/heads/trunk Commit: ebd50ff2a95b6a1a9a48a25c0509f07d0fba8cd5 Parents: bee0b4b dbca6d6 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 17:08:54 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 17:08:54 2013 -0500 -- CHANGES.txt | 20 ++-- 1 file changed, 14 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/ebd50ff2/CHANGES.txt -- diff --cc CHANGES.txt index 5abb16b,9e150b8..d60cff1 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -227,7 -114,17 +227,15 @@@ Merged from 1.2 * (Hadoop) quote identifiers in CqlPagingRecordReader (CASSANDRA-5763) * Add replace_node functionality for vnodes (CASSANDRA-5337) * Add timeout events to query traces (CASSANDRA-5520) - * make starting native protocol server idempotent (CASSANDRA-5728) - * Fix loading key cache when a saved entry is no longer valid (CASSANDRA-5706) * Fix serialization of the LEFT gossip value (CASSANDRA-5696) + * Pig: support for cql3 tables (CASSANDRA-5234) + * cqlsh: Don't show 'null' in place of empty values (CASSANDRA-5675) + * Race condition in detecting version on a mixed 1.1/1.2 cluster +(CASSANDRA-5692) + * Fix skipping range tombstones with reverse queries (CASSANDRA-5712) + * Expire entries out of ThriftSessionManager (CASSANDRA-5719) + * Don't keep ancestor information in memory (CASSANDRA-5342) + * cqlsh: fix handling of semicolons inside BATCH queries (CASSANDRA-5697) * Expose native protocol server status in nodetool info (CASSANDRA-5735) * Fix pathetic performance of range tombstones (CASSANDRA-5677) * Fix querying with an empty (impossible) range (CASSANDRA-5573)
[6/6] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d963e9b2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d963e9b2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d963e9b2 Branch: refs/heads/trunk Commit: d963e9b29917322a1408b77627e20734ef81fbb5 Parents: aca45fe ebd50ff Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 17:09:02 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 17:09:02 2013 -0500 -- CHANGES.txt | 20 ++-- 1 file changed, 14 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/d963e9b2/CHANGES.txt --
[1/6] git commit: fix a bunch of typos
Updated Branches: refs/heads/cassandra-1.2 df778818b - dbca6d62f refs/heads/cassandra-2.0 bee0b4b9d - ebd50ff2a refs/heads/trunk aca45fe99 - d963e9b29 fix a bunch of typos Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dbca6d62 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dbca6d62 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dbca6d62 Branch: refs/heads/cassandra-1.2 Commit: dbca6d62f1fa40f6aca9db8e443ef093d22d1ffb Parents: df77881 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 17:05:24 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 17:05:24 2013 -0500 -- CHANGES.txt | 14 +++--- 1 file changed, 7 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dbca6d62/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index b5f83b7..9e150b8 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -122,7 +122,7 @@ Merged from 1.1: * Race condition in detecting version on a mixed 1.1/1.2 cluster (CASSANDRA-5692) * Fix skipping range tombstones with reverse queries (CASSANDRA-5712) - * Expire entries out of ThriftSessionManager (CASSANRDA-5719) + * Expire entries out of ThriftSessionManager (CASSANDRA-5719) * Don't keep ancestor information in memory (CASSANDRA-5342) * cqlsh: fix handling of semicolons inside BATCH queries (CASSANDRA-5697) * Expose native protocol server status in nodetool info (CASSANDRA-5735) @@ -353,7 +353,7 @@ Merged from 1.1: * drain should flush system CFs too (CASSANDRA-4446) * add inter_dc_tcp_nodelay setting (CASSANDRA-5148) * re-allow wrapping ranges for start_token/end_token range pairitspwng (CASSANDRA-5106) - * fix validation compaction of empty rows (CASSADRA-5136) + * fix validation compaction of empty rows (CASSANDRA-5136) * nodetool methods to enable/disable hint storage/delivery (CASSANDRA-4750) * disallow bloom filter false positive chance of 0 (CASSANDRA-5013) * add threadpool size adjustment methods to JMXEnabledThreadPoolExecutor and @@ -539,7 +539,7 @@ Merged from 1.1: * Add support for batchlog in CQL3 (CASSANDRA-4545, 4738) * Add support for multiple column family outputs in CFOF (CASSANDRA-4208) * Support repairing only the local DC nodes (CASSANDRA-4747) - * Use rpc_address for binary protocol and change default port (CASSANRA-4751) + * Use rpc_address for binary protocol and change default port (CASSANDRA-4751) * Fix use of collections in prepared statements (CASSANDRA-4739) * Store more information into peers table (CASSANDRA-4351, 4814) * Configurable bucket size for size tiered compaction (CASSANDRA-4704) @@ -1233,7 +1233,7 @@ Merged from 0.8: * fix incorrect query results due to invalid max timestamp (CASSANDRA-3510) * make sstableloader recognize compressed sstables (CASSANDRA-3521) * avoids race in OutboundTcpConnection in multi-DC setups (CASSANDRA-3530) - * use SETLOCAL in cassandra.bat (CASANDRA-3506) + * use SETLOCAL in cassandra.bat (CASSANDRA-3506) * fix ConcurrentModificationException in Table.all() (CASSANDRA-3529) Merged from 0.8: * fix concurrence issue in the FailureDetector (CASSANDRA-3519) @@ -1276,7 +1276,7 @@ Merged from 0.8: * defragment rows for name-based queries under STCS (CASSANDRA-2503) * Add timing information to cassandra-cli GET/SET/LIST queries (CASSANDRA-3326) * Only create one CompressionMetadata object per sstable (CASSANDRA-3427) - * cleanup usage of StorageService.setMode() (CASANDRA-3388) + * cleanup usage of StorageService.setMode() (CASSANDRA-3388) * Avoid large array allocation for compressed chunk offsets (CASSANDRA-3432) * fix DecimalType bytebuffer marshalling (CASSANDRA-3421) * fix bug that caused first column in per row indexes to be ignored @@ -1581,7 +1581,7 @@ Merged from 0.8: * Fix divide by zero error in GCInspector (CASSANDRA-3164) * allow quoting of the ColumnFamily name in CLI `create column family` statement (CASSANDRA-3195) - * Fix rolling upgrade from 0.7 to 0.8 problem (CASANDRA-3166) + * Fix rolling upgrade from 0.7 to 0.8 problem (CASSANDRA-3166) * Accomodate missing encryption_options in IncomingTcpConnection.stream (CASSANDRA-3212) @@ -1885,7 +1885,7 @@ Merged from 0.8: * disallow making schema changes to system keyspace (CASSANDRA-2563) * fix sending mutation messages multiple times (CASSANDRA-2557) * fix incorrect use of NBHM.size in ReadCallback that could cause - reads to time out even when responses were received (CASSAMDRA-2552) + reads to time out even when responses were received (CASSANDRA-2552) * trigger read repair correctly for LOCAL_QUORUM reads (CASSANDRA-2556) * Allow configuring the
[5/6] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/ebd50ff2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/ebd50ff2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/ebd50ff2 Branch: refs/heads/cassandra-2.0 Commit: ebd50ff2a95b6a1a9a48a25c0509f07d0fba8cd5 Parents: bee0b4b dbca6d6 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 17:08:54 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 17:08:54 2013 -0500 -- CHANGES.txt | 20 ++-- 1 file changed, 14 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/ebd50ff2/CHANGES.txt -- diff --cc CHANGES.txt index 5abb16b,9e150b8..d60cff1 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -227,7 -114,17 +227,15 @@@ Merged from 1.2 * (Hadoop) quote identifiers in CqlPagingRecordReader (CASSANDRA-5763) * Add replace_node functionality for vnodes (CASSANDRA-5337) * Add timeout events to query traces (CASSANDRA-5520) - * make starting native protocol server idempotent (CASSANDRA-5728) - * Fix loading key cache when a saved entry is no longer valid (CASSANDRA-5706) * Fix serialization of the LEFT gossip value (CASSANDRA-5696) + * Pig: support for cql3 tables (CASSANDRA-5234) + * cqlsh: Don't show 'null' in place of empty values (CASSANDRA-5675) + * Race condition in detecting version on a mixed 1.1/1.2 cluster +(CASSANDRA-5692) + * Fix skipping range tombstones with reverse queries (CASSANDRA-5712) + * Expire entries out of ThriftSessionManager (CASSANDRA-5719) + * Don't keep ancestor information in memory (CASSANDRA-5342) + * cqlsh: fix handling of semicolons inside BATCH queries (CASSANDRA-5697) * Expose native protocol server status in nodetool info (CASSANDRA-5735) * Fix pathetic performance of range tombstones (CASSANDRA-5677) * Fix querying with an empty (impossible) range (CASSANDRA-5573)
[2/6] git commit: fix a bunch of typos
fix a bunch of typos Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dbca6d62 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dbca6d62 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dbca6d62 Branch: refs/heads/cassandra-2.0 Commit: dbca6d62f1fa40f6aca9db8e443ef093d22d1ffb Parents: df77881 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 17:05:24 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 17:05:24 2013 -0500 -- CHANGES.txt | 14 +++--- 1 file changed, 7 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dbca6d62/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index b5f83b7..9e150b8 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -122,7 +122,7 @@ Merged from 1.1: * Race condition in detecting version on a mixed 1.1/1.2 cluster (CASSANDRA-5692) * Fix skipping range tombstones with reverse queries (CASSANDRA-5712) - * Expire entries out of ThriftSessionManager (CASSANRDA-5719) + * Expire entries out of ThriftSessionManager (CASSANDRA-5719) * Don't keep ancestor information in memory (CASSANDRA-5342) * cqlsh: fix handling of semicolons inside BATCH queries (CASSANDRA-5697) * Expose native protocol server status in nodetool info (CASSANDRA-5735) @@ -353,7 +353,7 @@ Merged from 1.1: * drain should flush system CFs too (CASSANDRA-4446) * add inter_dc_tcp_nodelay setting (CASSANDRA-5148) * re-allow wrapping ranges for start_token/end_token range pairitspwng (CASSANDRA-5106) - * fix validation compaction of empty rows (CASSADRA-5136) + * fix validation compaction of empty rows (CASSANDRA-5136) * nodetool methods to enable/disable hint storage/delivery (CASSANDRA-4750) * disallow bloom filter false positive chance of 0 (CASSANDRA-5013) * add threadpool size adjustment methods to JMXEnabledThreadPoolExecutor and @@ -539,7 +539,7 @@ Merged from 1.1: * Add support for batchlog in CQL3 (CASSANDRA-4545, 4738) * Add support for multiple column family outputs in CFOF (CASSANDRA-4208) * Support repairing only the local DC nodes (CASSANDRA-4747) - * Use rpc_address for binary protocol and change default port (CASSANRA-4751) + * Use rpc_address for binary protocol and change default port (CASSANDRA-4751) * Fix use of collections in prepared statements (CASSANDRA-4739) * Store more information into peers table (CASSANDRA-4351, 4814) * Configurable bucket size for size tiered compaction (CASSANDRA-4704) @@ -1233,7 +1233,7 @@ Merged from 0.8: * fix incorrect query results due to invalid max timestamp (CASSANDRA-3510) * make sstableloader recognize compressed sstables (CASSANDRA-3521) * avoids race in OutboundTcpConnection in multi-DC setups (CASSANDRA-3530) - * use SETLOCAL in cassandra.bat (CASANDRA-3506) + * use SETLOCAL in cassandra.bat (CASSANDRA-3506) * fix ConcurrentModificationException in Table.all() (CASSANDRA-3529) Merged from 0.8: * fix concurrence issue in the FailureDetector (CASSANDRA-3519) @@ -1276,7 +1276,7 @@ Merged from 0.8: * defragment rows for name-based queries under STCS (CASSANDRA-2503) * Add timing information to cassandra-cli GET/SET/LIST queries (CASSANDRA-3326) * Only create one CompressionMetadata object per sstable (CASSANDRA-3427) - * cleanup usage of StorageService.setMode() (CASANDRA-3388) + * cleanup usage of StorageService.setMode() (CASSANDRA-3388) * Avoid large array allocation for compressed chunk offsets (CASSANDRA-3432) * fix DecimalType bytebuffer marshalling (CASSANDRA-3421) * fix bug that caused first column in per row indexes to be ignored @@ -1581,7 +1581,7 @@ Merged from 0.8: * Fix divide by zero error in GCInspector (CASSANDRA-3164) * allow quoting of the ColumnFamily name in CLI `create column family` statement (CASSANDRA-3195) - * Fix rolling upgrade from 0.7 to 0.8 problem (CASANDRA-3166) + * Fix rolling upgrade from 0.7 to 0.8 problem (CASSANDRA-3166) * Accomodate missing encryption_options in IncomingTcpConnection.stream (CASSANDRA-3212) @@ -1885,7 +1885,7 @@ Merged from 0.8: * disallow making schema changes to system keyspace (CASSANDRA-2563) * fix sending mutation messages multiple times (CASSANDRA-2557) * fix incorrect use of NBHM.size in ReadCallback that could cause - reads to time out even when responses were received (CASSAMDRA-2552) + reads to time out even when responses were received (CASSANDRA-2552) * trigger read repair correctly for LOCAL_QUORUM reads (CASSANDRA-2556) * Allow configuring the number of compaction thread (CASSANDRA-2558) * forceUserDefinedCompaction will attempt to compact what it is given
[3/6] git commit: fix a bunch of typos
fix a bunch of typos Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dbca6d62 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dbca6d62 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dbca6d62 Branch: refs/heads/trunk Commit: dbca6d62f1fa40f6aca9db8e443ef093d22d1ffb Parents: df77881 Author: Brandon Williams brandonwilli...@apache.org Authored: Tue Oct 22 17:05:24 2013 -0500 Committer: Brandon Williams brandonwilli...@apache.org Committed: Tue Oct 22 17:05:24 2013 -0500 -- CHANGES.txt | 14 +++--- 1 file changed, 7 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/dbca6d62/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index b5f83b7..9e150b8 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -122,7 +122,7 @@ Merged from 1.1: * Race condition in detecting version on a mixed 1.1/1.2 cluster (CASSANDRA-5692) * Fix skipping range tombstones with reverse queries (CASSANDRA-5712) - * Expire entries out of ThriftSessionManager (CASSANRDA-5719) + * Expire entries out of ThriftSessionManager (CASSANDRA-5719) * Don't keep ancestor information in memory (CASSANDRA-5342) * cqlsh: fix handling of semicolons inside BATCH queries (CASSANDRA-5697) * Expose native protocol server status in nodetool info (CASSANDRA-5735) @@ -353,7 +353,7 @@ Merged from 1.1: * drain should flush system CFs too (CASSANDRA-4446) * add inter_dc_tcp_nodelay setting (CASSANDRA-5148) * re-allow wrapping ranges for start_token/end_token range pairitspwng (CASSANDRA-5106) - * fix validation compaction of empty rows (CASSADRA-5136) + * fix validation compaction of empty rows (CASSANDRA-5136) * nodetool methods to enable/disable hint storage/delivery (CASSANDRA-4750) * disallow bloom filter false positive chance of 0 (CASSANDRA-5013) * add threadpool size adjustment methods to JMXEnabledThreadPoolExecutor and @@ -539,7 +539,7 @@ Merged from 1.1: * Add support for batchlog in CQL3 (CASSANDRA-4545, 4738) * Add support for multiple column family outputs in CFOF (CASSANDRA-4208) * Support repairing only the local DC nodes (CASSANDRA-4747) - * Use rpc_address for binary protocol and change default port (CASSANRA-4751) + * Use rpc_address for binary protocol and change default port (CASSANDRA-4751) * Fix use of collections in prepared statements (CASSANDRA-4739) * Store more information into peers table (CASSANDRA-4351, 4814) * Configurable bucket size for size tiered compaction (CASSANDRA-4704) @@ -1233,7 +1233,7 @@ Merged from 0.8: * fix incorrect query results due to invalid max timestamp (CASSANDRA-3510) * make sstableloader recognize compressed sstables (CASSANDRA-3521) * avoids race in OutboundTcpConnection in multi-DC setups (CASSANDRA-3530) - * use SETLOCAL in cassandra.bat (CASANDRA-3506) + * use SETLOCAL in cassandra.bat (CASSANDRA-3506) * fix ConcurrentModificationException in Table.all() (CASSANDRA-3529) Merged from 0.8: * fix concurrence issue in the FailureDetector (CASSANDRA-3519) @@ -1276,7 +1276,7 @@ Merged from 0.8: * defragment rows for name-based queries under STCS (CASSANDRA-2503) * Add timing information to cassandra-cli GET/SET/LIST queries (CASSANDRA-3326) * Only create one CompressionMetadata object per sstable (CASSANDRA-3427) - * cleanup usage of StorageService.setMode() (CASANDRA-3388) + * cleanup usage of StorageService.setMode() (CASSANDRA-3388) * Avoid large array allocation for compressed chunk offsets (CASSANDRA-3432) * fix DecimalType bytebuffer marshalling (CASSANDRA-3421) * fix bug that caused first column in per row indexes to be ignored @@ -1581,7 +1581,7 @@ Merged from 0.8: * Fix divide by zero error in GCInspector (CASSANDRA-3164) * allow quoting of the ColumnFamily name in CLI `create column family` statement (CASSANDRA-3195) - * Fix rolling upgrade from 0.7 to 0.8 problem (CASANDRA-3166) + * Fix rolling upgrade from 0.7 to 0.8 problem (CASSANDRA-3166) * Accomodate missing encryption_options in IncomingTcpConnection.stream (CASSANDRA-3212) @@ -1885,7 +1885,7 @@ Merged from 0.8: * disallow making schema changes to system keyspace (CASSANDRA-2563) * fix sending mutation messages multiple times (CASSANDRA-2557) * fix incorrect use of NBHM.size in ReadCallback that could cause - reads to time out even when responses were received (CASSAMDRA-2552) + reads to time out even when responses were received (CASSANDRA-2552) * trigger read repair correctly for LOCAL_QUORUM reads (CASSANDRA-2556) * Allow configuring the number of compaction thread (CASSANDRA-2558) * forceUserDefinedCompaction will attempt to compact what it is given
[jira] [Updated] (CASSANDRA-6228) Add view trace session to cqlsh
[ https://issues.apache.org/jira/browse/CASSANDRA-6228?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Stepura updated CASSANDRA-6228: --- Attachment: cassandra-2.0-6228.patch implemented {{SHOW SESSION uuid}} command Add view trace session to cqlsh - Key: CASSANDRA-6228 URL: https://issues.apache.org/jira/browse/CASSANDRA-6228 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Jeremiah Jordan Assignee: Mikhail Stepura Priority: Trivial Attachments: cassandra-2.0-6228.patch It would be nice if cqlsh had a command to pass a tracing session id in, and have it print out the trace the same way it does when tracing is on. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-5519) Reduce index summary memory use for cold sstables
[ https://issues.apache.org/jira/browse/CASSANDRA-5519?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-5519: --- Attachment: downsample.py The attached downsample.py script demonstrates the downsampling algorithm. It's a touch complex, but it would be easy to precompute or cache the downsampling patterns if needed. An example run with an original index summary size of 16 and a resolution of 8, meaning each minimal downsample run will remove 1/8th of the original points. The top row is the original index summary and each row below that represents one downsampling run: {noformat} ~ $ ./downsample.py 16 8 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 9 10 11 12 13 14 15 1 2 3 5 6 7 9 10 11 13 14 15 1 3 5 6 7 9 11 13 14 15 1 3 5 7 9 11 13 15 3 5 7 11 13 15 3 7 11 15 {noformat} Reduce index summary memory use for cold sstables - Key: CASSANDRA-5519 URL: https://issues.apache.org/jira/browse/CASSANDRA-5519 Project: Cassandra Issue Type: Bug Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Priority: Minor Fix For: 2.1 Attachments: downsample.py -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Created] (CASSANDRA-6230) Write hints to a file instead of a table
Jonathan Ellis created CASSANDRA-6230: - Summary: Write hints to a file instead of a table Key: CASSANDRA-6230 URL: https://issues.apache.org/jira/browse/CASSANDRA-6230 Project: Cassandra Issue Type: Bug Components: Core Reporter: Jonathan Ellis Assignee: Oleg Anastasyev Priority: Minor Fix For: 2.1 Writing to a file would have less overhead on both hint creation and replay. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Updated] (CASSANDRA-5902) Dealing with hints after a topology change
[ https://issues.apache.org/jira/browse/CASSANDRA-5902?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-5902: -- Assignee: (was: Jonathan Ellis) Does this qualify as LHF [~brandon.williams]? If we have a hook for the topology change I think that's the only hard part. Dealing with hints after a topology change -- Key: CASSANDRA-5902 URL: https://issues.apache.org/jira/browse/CASSANDRA-5902 Project: Cassandra Issue Type: Bug Reporter: Jonathan Ellis Priority: Minor Hints are stored and delivered by destination node id. This allows them to survive IP changes in the target, while making scan all the hints for a given destination an efficient operation. However, we do not detect and handle new node assuming responsibility for the hinted row via bootstrap before it can be delivered. I think we have to take a performance hit in this case -- we need to deliver such a hint to *all* replicas, since we don't know which is the new one. This happens infrequently enough, however -- requiring first the target node to be down to create the hint, then the hint owner to be down long enough for the target to both recover and stream to a new node -- that this should be okay. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-5883) Switch to Logback
[ https://issues.apache.org/jira/browse/CASSANDRA-5883?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802400#comment-13802400 ] Jonathan Ellis commented on CASSANDRA-5883: --- We don't log *that* much so speed is kind of a non-issue for me. Does log4j2 buy us anything else? I'm definitely a fan of logback's auto-reload of config. Switch to Logback - Key: CASSANDRA-5883 URL: https://issues.apache.org/jira/browse/CASSANDRA-5883 Project: Cassandra Issue Type: Improvement Components: Core, Tools Reporter: Jonathan Ellis Assignee: Dave Brosius Priority: Minor Fix For: 2.1 Attachments: 0001-Additional-migration-to-logback.patch, 5883-1.txt, 5883-additional1.txt, 5883.txt Logback has a number of advantages over log4j, and switching will be straightforward since we are already using the slf4j translation layer: http://logback.qos.ch/reasonsToSwitch.html -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Commented] (CASSANDRA-5902) Dealing with hints after a topology change
[ https://issues.apache.org/jira/browse/CASSANDRA-5902?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802401#comment-13802401 ] Brandon Williams commented on CASSANDRA-5902: - Maybe a bit more than LHF imo, but I guess it won't hurt to try. Dealing with hints after a topology change -- Key: CASSANDRA-5902 URL: https://issues.apache.org/jira/browse/CASSANDRA-5902 Project: Cassandra Issue Type: Bug Reporter: Jonathan Ellis Priority: Minor Hints are stored and delivered by destination node id. This allows them to survive IP changes in the target, while making scan all the hints for a given destination an efficient operation. However, we do not detect and handle new node assuming responsibility for the hinted row via bootstrap before it can be delivered. I think we have to take a performance hit in this case -- we need to deliver such a hint to *all* replicas, since we don't know which is the new one. This happens infrequently enough, however -- requiring first the target node to be down to create the hint, then the hint owner to be down long enough for the target to both recover and stream to a new node -- that this should be okay. -- This message was sent by Atlassian JIRA (v6.1#6144)
[jira] [Resolved] (CASSANDRA-5777) Crash if internode compression is on
[ https://issues.apache.org/jira/browse/CASSANDRA-5777?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-5777. --- Resolution: Invalid Crash if internode compression is on - Key: CASSANDRA-5777 URL: https://issues.apache.org/jira/browse/CASSANDRA-5777 Project: Cassandra Issue Type: Bug Affects Versions: 2.0 beta 1 Environment: Raspberry Pi, Oracle jdk 1.8 Reporter: Andy Cobley Priority: Minor 2 node Raspberry Pi system. Node one is started fine. Try and start node 2 making sure that both nodes have internnode compression turned on: internode_compression: all When node 2 starts there is a crash. Trace is: {noformat} DEBUG 10:45:08,432 adding /var/lib/cassandra/data/system/local/system-local-ja-2 to list of files tracked for system.local DEBUG 10:45:08,438 Scheduling a background task check for system.local with SizeTieredCompactionStrategy DEBUG 10:45:08,443 Checking system.local DEBUG 10:45:08,448 Compaction buckets are [[SSTableReader(path='/var/lib/cassandra/data/system/local/system-local-ja-1-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/local/system-local-ja-2-Data.db')]] DEBUG 10:45:08,452 discard completed log segments for ReplayPosition(segmentId=1374230697429, position=81283), column family 7ad54392-bcdd-35a6-8417-4e047860b377 DEBUG 10:45:08,457 Not deleting active commitlog segment CommitLogSegment(/var/lib/cassandra/commitlog/CommitLog-3-1374230697429.log) DEBUG 10:45:08,462 No tasks available DEBUG 10:45:09,028 Gossiping my schema version 59adb24e-f3cd-3e02-97f0-5b395827453f INFO 10:45:09,099 Starting Messaging Service on port 7000 DEBUG 10:45:09,629 Created HHOM instance, registered MBean. DEBUG 10:45:09,803 attempting to connect to /192.168.0.10 DEBUG 10:45:09,812 Bootstrap variables: true false false false INFO 10:45:09,909 Enqueuing flush of Memtable-local@12523409(86/86 serialized/live bytes, 4 ops) INFO 10:45:09,916 Writing Memtable-local@12523409(86/86 serialized/live bytes, 4 ops) INFO 10:45:09,932 Handshaking version with /192.168.0.10 DEBUG 10:45:10,664 Renaming /var/lib/cassandra/data/system/local/system-local-tmp-ja-3-Index.db to /var/lib/cassandra/data/system/local/system-local-ja-3-Index.db DEBUG 10:45:10,709 Renaming /var/lib/cassandra/data/system/local/system-local-tmp-ja-3-TOC.txt to /var/lib/cassandra/data/system/local/system-local-ja-3-TOC.txt DEBUG 10:45:10,723 Renaming /var/lib/cassandra/data/system/local/system-local-tmp-ja-3-CompressionInfo.db to /var/lib/cassandra/data/system/local/system-local-ja-3-CompressionInfo.db DEBUG 10:45:10,733 Renaming /var/lib/cassandra/data/system/local/system-local-tmp-ja-3-Statistics.db to /var/lib/cassandra/data/system/local/system-local-ja-3-Statistics.db DEBUG 10:45:10,743 Renaming /var/lib/cassandra/data/system/local/system-local-tmp-ja-3-Filter.db to /var/lib/cassandra/data/system/local/system-local-ja-3-Filter.db DEBUG 10:45:10,751 Renaming /var/lib/cassandra/data/system/local/system-local-tmp-ja-3-Data.db to /var/lib/cassandra/data/system/local/system-local-ja-3-Data.db INFO 10:45:10,777 Completed flushing /var/lib/cassandra/data/system/local/system-local-ja-3-Data.db (118 bytes) for commitlog position ReplayPosition(segmentId=1374230697429, position=81544) DEBUG 10:45:10,806 adding /var/lib/cassandra/data/system/local/system-local-ja-3 to list of files tracked for system.local DEBUG 10:45:10,812 Scheduling a background task check for system.local with SizeTieredCompactionStrategy DEBUG 10:45:10,821 Checking system.local DEBUG 10:45:10,827 Compaction buckets are [[SSTableReader(path='/var/lib/cassandra/data/system/local/system-local-ja-3-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/local/system-local-ja-1-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/local/system-local-ja-2-Data.db')]] # DEBUG 10:45:10,831 discard completed log segments for ReplayPosition(segmentId=1374230697429, position=81544), column family 7ad54392-bcdd-35a6-8417-4e047860b377 DEBUG 10:45:10,862 Not deleting active commitlog segment CommitLogSegment(/var/lib/cassandra/commitlog/CommitLog-3-1374230697429.log) DEBUG 10:45:10,868 No tasks available # A fatal error has been detected by the Java Runtime Environment: # # SIGILL (0x4) at pc=0xab80b254, pid=20302, tid=2800444528 # # JRE version: Java(TM) SE Runtime Environment (8.0-b97) (build 1.8.0-ea-b97) # Java VM: Java HotSpot(TM) Client VM (25.0-b39 mixed mode linux-arm ) # Problematic frame: # C [snappy-1.0.5-libsnappyjava.so+0x1254] _init+0x223 # # Failed to write core dump. Core dumps have been disabled. To enable core dumping, try ulimit -c unlimited before starting Java again # # An error report
[jira] [Commented] (CASSANDRA-5519) Reduce index summary memory use for cold sstables
[ https://issues.apache.org/jira/browse/CASSANDRA-5519?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13802408#comment-13802408 ] Jonathan Ellis commented on CASSANDRA-5519: --- LGTM Reduce index summary memory use for cold sstables - Key: CASSANDRA-5519 URL: https://issues.apache.org/jira/browse/CASSANDRA-5519 Project: Cassandra Issue Type: Bug Components: Core Reporter: Jonathan Ellis Assignee: Tyler Hobbs Priority: Minor Fix For: 2.1 Attachments: downsample.py -- This message was sent by Atlassian JIRA (v6.1#6144)