[1/3] cassandra git commit: include missing files

2015-03-12 Thread benedict
Repository: cassandra
Updated Branches:
  refs/heads/cassandra-2.1 6bbfb5574 - 97d65d6d3
  refs/heads/trunk b7563f823 - 05a6f2667


include missing files


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/97d65d6d
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/97d65d6d
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/97d65d6d

Branch: refs/heads/cassandra-2.1
Commit: 97d65d6d3689ef3fd47d760004377486ad00d631
Parents: 6bbfb55
Author: Benedict Elliott Smith bened...@apache.org
Authored: Tue Mar 10 17:13:46 2015 +
Committer: Benedict Elliott Smith bened...@apache.org
Committed: Tue Mar 10 17:13:46 2015 +

--
 .../cassandra/stress/util/TimingIntervals.java  | 139 +++
 1 file changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/97d65d6d/tools/stress/src/org/apache/cassandra/stress/util/TimingIntervals.java
--
diff --git 
a/tools/stress/src/org/apache/cassandra/stress/util/TimingIntervals.java 
b/tools/stress/src/org/apache/cassandra/stress/util/TimingIntervals.java
new file mode 100644
index 000..ab89d07
--- /dev/null
+++ b/tools/stress/src/org/apache/cassandra/stress/util/TimingIntervals.java
@@ -0,0 +1,139 @@
+package org.apache.cassandra.stress.util;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.TreeMap;
+
+public class TimingIntervals
+{
+final MapString, TimingInterval intervals;
+TimingIntervals(IterableString opTypes)
+{
+long now = System.nanoTime();
+intervals = new TreeMap();
+for (String opType : opTypes)
+intervals.put(opType, new TimingInterval(now));
+}
+
+TimingIntervals(MapString, TimingInterval intervals)
+{
+this.intervals = intervals;
+}
+
+public TimingIntervals merge(TimingIntervals with, int maxSamples, long 
start)
+{
+assert intervals.size() == with.intervals.size();
+TreeMapString, TimingInterval ret = new TreeMap();
+
+for (String opType : intervals.keySet())
+{
+assert with.intervals.containsKey(opType);
+ret.put(opType, 
TimingInterval.merge(Arrays.asList(intervals.get(opType), 
with.intervals.get(opType)), maxSamples, start));
+}
+
+return new TimingIntervals(ret);
+}
+
+public TimingInterval get(String opType)
+{
+return intervals.get(opType);
+}
+
+public TimingInterval combine(int maxSamples)
+{
+long start = Long.MAX_VALUE;
+for (TimingInterval ti : intervals.values())
+start = Math.min(start, ti.startNanos());
+
+return TimingInterval.merge(intervals.values(), maxSamples, start);
+}
+
+public String str(TimingInterval.TimingParameter value)
+{
+return str(value, Float.NaN);
+}
+
+public String str(TimingInterval.TimingParameter value, float rank)
+{
+StringBuilder sb = new StringBuilder([);
+
+for (Map.EntryString, TimingInterval entry : intervals.entrySet())
+{
+sb.append(entry.getKey());
+sb.append(:);
+sb.append(entry.getValue().getStringValue(value, rank));
+sb.append(, );
+}
+
+sb.setLength(sb.length()-2);
+sb.append(]);
+
+return sb.toString();
+}
+
+public String opRates()
+{
+return str(TimingInterval.TimingParameter.OPRATE);
+}
+public String partitionRates()
+{
+return str(TimingInterval.TimingParameter.PARTITIONRATE);
+}
+public String rowRates()
+{
+return str(TimingInterval.TimingParameter.ROWRATE);
+}
+public String meanLatencies()
+{
+return str(TimingInterval.TimingParameter.MEANLATENCY);
+}
+public String maxLatencies()
+{
+return str(TimingInterval.TimingParameter.MAXLATENCY);
+}
+public String medianLatencies()
+{
+return str(TimingInterval.TimingParameter.MEDIANLATENCY);
+}
+public String rankLatencies(float rank)
+{
+return str(TimingInterval.TimingParameter.MEDIANLATENCY, rank);
+}
+public String errorCounts()
+{
+return str(TimingInterval.TimingParameter.ERRORCOUNT);
+}
+public String partitionCounts()
+{
+return str(TimingInterval.TimingParameter.PARTITIONCOUNT);
+}
+
+public long opRate()
+{
+long v = 0;
+for (TimingInterval interval : intervals.values())
+v += interval.opRate();
+return v;
+}
+
+public long startNanos()
+{
+long start = Long.MAX_VALUE;
+for (TimingInterval interval : intervals.values())
+start = Math.min(start, interval.startNanos());
+  

[3/3] cassandra git commit: Merge branch 'cassandra-2.1' into trunk

2015-03-12 Thread benedict
Merge branch 'cassandra-2.1' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/05a6f266
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/05a6f266
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/05a6f266

Branch: refs/heads/trunk
Commit: 05a6f2667d368fd551c6b0803619cfc5e762ac19
Parents: b7563f8 97d65d6
Author: Benedict Elliott Smith bened...@apache.org
Authored: Tue Mar 10 17:13:56 2015 +
Committer: Benedict Elliott Smith bened...@apache.org
Committed: Tue Mar 10 17:13:56 2015 +

--

--




cassandra git commit: Expose commit log archiver status via JMX

2015-03-12 Thread jmckenzie
Repository: cassandra
Updated Branches:
  refs/heads/cassandra-2.0 d6ea0ff1f - 77c66bf9f


Expose commit log archiver status via JMX

Patch by clohfink; reviewed by jmckenzie for CASSANDRA-8734


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/77c66bf9
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/77c66bf9
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/77c66bf9

Branch: refs/heads/cassandra-2.0
Commit: 77c66bf9f50228306d4bf76bb2721c731875f261
Parents: d6ea0ff
Author: Chris Lohfink clohfin...@gmail.com
Authored: Wed Mar 11 11:14:59 2015 -0500
Committer: Joshua McKenzie jmcken...@apache.org
Committed: Wed Mar 11 11:14:59 2015 -0500

--
 CHANGES.txt |  1 +
 .../cassandra/db/commitlog/CommitLog.java   | 30 +++
 .../db/commitlog/CommitLogArchiver.java |  6 ++--
 .../cassandra/db/commitlog/CommitLogMBean.java  | 31 
 4 files changed, 65 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/77c66bf9/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index d240a10..382b3dd 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 2.0.14:
+ * Expose commit log archive status via JMX (CASSANDRA-8734)
  * Provide better exceptions for invalid replication strategy parameters
(CASSANDRA-8909)
  * Fix regression in mixed single and multi-column relation support for

http://git-wip-us.apache.org/repos/asf/cassandra/blob/77c66bf9/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
--
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
index 7a27653..a3ce804 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
@@ -299,6 +299,36 @@ public class CommitLog implements CommitLogMBean
 return metrics.totalCommitLogSize.value();
 }
 
+@Override
+public String getArchiveCommand()
+{
+return archiver.archiveCommand;
+}
+
+@Override
+public String getRestoreCommand()
+{
+return archiver.restoreCommand;
+}
+
+@Override
+public String getRestoreDirectories()
+{
+return archiver.restoreDirectories;
+}
+
+@Override
+public long getRestorePointInTime()
+{
+return archiver.restorePointInTime;
+}
+
+@Override
+public String getRestorePrecision()
+{
+return archiver.precision.toString();
+}
+
 /**
  * Fetches a new segment file from the allocator and activates it.
  *

http://git-wip-us.apache.org/repos/asf/cassandra/blob/77c66bf9/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
--
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
index 8957643..fd03624 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
@@ -53,9 +53,9 @@ public class CommitLogArchiver
 
 public final MapString, Future? archivePending = new 
ConcurrentHashMapString, Future?();
 public final ExecutorService executor = new 
JMXEnabledThreadPoolExecutor(commitlog_archiver);
-private final String archiveCommand;
-private final String restoreCommand;
-private final String restoreDirectories;
+final String archiveCommand;
+final String restoreCommand;
+final String restoreDirectories;
 public final long restorePointInTime;
 public final TimeUnit precision;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/77c66bf9/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
--
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
index 6c0d8d7..9f3eb9b 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
@@ -45,6 +45,37 @@ public interface CommitLogMBean
 public long getTotalCommitlogSize();
 
 /**
+ *  Command to execute to archive a commitlog segment.  Blank to disabled.
+ */
+public String getArchiveCommand();
+
+/**
+ * Command to execute to make an archived commitlog live again
+ */
+public String getRestoreCommand();
+
+/**
+ * Directory to scan the recovery 

[3/3] cassandra git commit: Merge branch 'cassandra-2.1' into trunk

2015-03-12 Thread benedict
Merge branch 'cassandra-2.1' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e585c339
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e585c339
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e585c339

Branch: refs/heads/trunk
Commit: e585c339d0fbc07d78bd46aff845d0b721360ad1
Parents: 69542a9 81fe1a2
Author: Benedict Elliott Smith bened...@apache.org
Authored: Tue Mar 10 17:43:17 2015 +
Committer: Benedict Elliott Smith bened...@apache.org
Committed: Tue Mar 10 17:43:17 2015 +

--
 .../src/org/apache/cassandra/stress/util/TimingIntervals.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--




[jira] [Comment Edited] (CASSANDRA-8938) Full Row Scan does not count towards Reads

2015-03-12 Thread Phil Yang (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8938?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358257#comment-14358257
 ] 

Phil Yang edited comment on CASSANDRA-8938 at 3/12/15 8:04 AM:
---

I'm not an expert. Do you mean full row scan is selecting all rows in a 
table? The full table scan seems need to read all sstables, so compaction that 
reduces the number of sstables will not reduce the disk IO while scanning the 
whole row. I think it is the reason that sstables will not be marked as hot 
after the full table scan.

For your first question, it seems that there are two read counter, one is 
readLatency the other is rangeLatency, and nodetool only returns the count 
of the first one so your scan query does not affect it.


was (Author: yangzhe1991):
I'm not an expert. Do you mean full row scan is selecting all rows in a 
table? The full table scan seems need to read all sstables, so compaction that 
reduces the number of sstables will not reduce the disk IO while scanning the 
whole row. I think it is the reason that sstables will not be marked as hot 
after the full row scan.

For your first question, it seems that there are two read counter, one is 
readLatency the other is rangeLatency, and nodetool only returns the count 
of the first one so your scan query does not affect it.

 Full Row Scan does not count towards Reads
 --

 Key: CASSANDRA-8938
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8938
 Project: Cassandra
  Issue Type: Bug
  Components: API, Core, Tools
 Environment: Unix, Cassandra 2.0.3
Reporter: Amit Singh Chowdhery
Priority: Minor
  Labels: none

 When a CQL SELECT statement is executed with WHERE clause, Read Count is 
 incremented in cfstats of the column family. But, when a full row scan is 
 done using SELECT statement without WHERE clause, Read Count is not 
 incremented. 
 Similarly, when using Size Tiered Compaction, if we do a full row scan using 
 Hector RangeslicesQuery, Read Count is not incremented in cfstats, Cassandra 
 still considers all sstables as cold and does not trigger compaction for 
 them. If we fire MultigetSliceQuery, Read Count is incremented and sstables 
 becomes hot, triggering compaction of these sstables. 
 Expected Behavior:
 1. Read Count must be incremented by number of rows read during a full row 
 scan done using CQL SELECT statement or Hector RangeslicesQuery.
 2. Size Tiered compaction must consider all sstables as Hot after a full row 
 scan.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8692) Coalesce intra-cluster network messages

2015-03-12 Thread Ariel Weisberg (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8692?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14356905#comment-14356905
 ] 

Ariel Weisberg commented on CASSANDRA-8692:
---

I can't make them consistent because there are cases where it's non-empty?

 If I don't use the extra method I have to cut and paste the precondition into 
every implementation. I want to leave it as is.

 Coalesce intra-cluster network messages
 ---

 Key: CASSANDRA-8692
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8692
 Project: Cassandra
  Issue Type: Improvement
  Components: Core
Reporter: Ariel Weisberg
Assignee: Ariel Weisberg
 Fix For: 2.1.4

 Attachments: batching-benchmark.png


 While researching CASSANDRA-8457 we found that it is effective and can be 
 done without introducing additional latency at low concurrency/throughput.
 The patch from that was used and found to be useful in a real life scenario 
 so I propose we implement this in 2.1 in addition to 3.0.
 The change set is a single file and is small enough to be reviewable.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8938) Full Row Scan does not count towards Reads

2015-03-12 Thread Anuj (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8938?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358306#comment-14358306
 ] 

Anuj commented on CASSANDRA-8938:
-

Yes. We mean full row scan (select query without WHERE clause). Even if full 
row scan reads all sstables,it should be considered as Reads and all sstables 
must be marked hot and available for next compaction. 

There is only one Read Count when you do cfstats. We are not talking about 
latency.

We think that after a row scan , read count must be incremented and STCS should 
pick these sstables for compaction as data has been read from them.  

 Full Row Scan does not count towards Reads
 --

 Key: CASSANDRA-8938
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8938
 Project: Cassandra
  Issue Type: Bug
  Components: API, Core, Tools
 Environment: Unix, Cassandra 2.0.3
Reporter: Amit Singh Chowdhery
Priority: Minor
  Labels: none

 When a CQL SELECT statement is executed with WHERE clause, Read Count is 
 incremented in cfstats of the column family. But, when a full row scan is 
 done using SELECT statement without WHERE clause, Read Count is not 
 incremented. 
 Similarly, when using Size Tiered Compaction, if we do a full row scan using 
 Hector RangeslicesQuery, Read Count is not incremented in cfstats, Cassandra 
 still considers all sstables as cold and does not trigger compaction for 
 them. If we fire MultigetSliceQuery, Read Count is incremented and sstables 
 becomes hot, triggering compaction of these sstables. 
 Expected Behavior:
 1. Read Count must be incremented by number of rows read during a full row 
 scan done using CQL SELECT statement or Hector RangeslicesQuery.
 2. Size Tiered compaction must consider all sstables as Hot after a full row 
 scan.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8085) Make PasswordAuthenticator number of hashing rounds configurable

2015-03-12 Thread Aleksey Yeschenko (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8085?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Aleksey Yeschenko updated CASSANDRA-8085:
-
Assignee: Sam Tunnicliffe  (was: Aleksey Yeschenko)

 Make PasswordAuthenticator number of hashing rounds configurable
 

 Key: CASSANDRA-8085
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8085
 Project: Cassandra
  Issue Type: Improvement
  Components: Core
Reporter: Tyler Hobbs
Assignee: Sam Tunnicliffe
 Fix For: 2.1.4


 Running 2^10 rounds of bcrypt can take a while.  In environments (like PHP) 
 where connections are not typically long-lived, authenticating can add 
 substantial overhead.  On IRC, one user saw the time to connect, 
 authenticate, and execute a query jump from 5ms to 150ms with authentication 
 enabled ([debug logs|http://pastebin.com/bSUufbr0]).
 CASSANDRA-7715 is a more complete fix for this, but in the meantime (and even 
 after 7715), this is a good option.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-6809) Compressed Commit Log

2015-03-12 Thread Ariel Weisberg (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-6809?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14355743#comment-14355743
 ] 

Ariel Weisberg commented on CASSANDRA-6809:
---

I really like what you did with CommitLogStressTest.

At 
https://github.com/apache/cassandra/compare/trunk...blambov:6809-compressed-logs#diff-d07279710c482983e537aed26df80400R340
If archiving fails it appears to delete the segment now. Is that the right 
thing to do?

CSLM's understanding of segment size is skewed because compressed segments are 
less than the expected segment size in reality. With real compression ratios 
it's going to be off by 30-50%. If when the size is known it's tracking could 
be corrected it would be nice.

For the buffer pooling. I would be tempted to not wait for the collector to get 
to the DBB. If the DBB is promoted due to compaction or some other allocation 
hog it may not be reclaimed for some time. In CompressedSegment.close maybe 
null the field then invoke the cleaner on the buffer. There is a utility method 
for doing that so you don't have to access the interface directly (generates a 
compiler warning).

Also make MAX_BUFFERPOOL_SIZE configurable via a property. I have been 
prefixing internal C* properties with cassandra. I suspect that at several 
hundred megabytes a second we will have more than 3 32 megabyte buffers in 
flight. I have a personal fear of shipping constants that aren't quite right 
and putting them all in properties can save waiting for code changes.

I tested on Linux. If I drop the page cache on the new code it doesn't generate 
reads. I tested the old code and it generated a few hundred megabytes of reads.

 Compressed Commit Log
 -

 Key: CASSANDRA-6809
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6809
 Project: Cassandra
  Issue Type: Improvement
Reporter: Benedict
Assignee: Branimir Lambov
Priority: Minor
  Labels: docs-impacting, performance
 Fix For: 3.0

 Attachments: ComitLogStress.java, logtest.txt


 It seems an unnecessary oversight that we don't compress the commit log. 
 Doing so should improve throughput, but some care will need to be taken to 
 ensure we use as much of a segment as possible. I propose decoupling the 
 writing of the records from the segments. Basically write into a (queue of) 
 DirectByteBuffer, and have the sync thread compress, say, ~64K chunks every X 
 MB written to the CL (where X is ordinarily CLS size), and then pack as many 
 of the compressed chunks into a CLS as possible.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8553) Add a key-value payload for third party usage

2015-03-12 Thread Sylvain Lebresne (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8553?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358278#comment-14358278
 ] 

Sylvain Lebresne commented on CASSANDRA-8553:
-

We have unused flags in our frames header, so using one to indicate that a 
message includes a simple key-value map at the beginning or end of the body 
should be simple to do.

 Add a key-value payload for third party usage
 -

 Key: CASSANDRA-8553
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8553
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Sergio Bossa
  Labels: client-impacting, protocolv4
 Fix For: 3.0


 An useful improvement would be to include a generic key-value payload, so 
 that developers implementing a custom {{QueryHandler}} could leverage that to 
 move custom data back and forth.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Assigned] (CASSANDRA-8413) Bloom filter false positive ratio is not honoured

2015-03-12 Thread Robert Stupp (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8413?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Robert Stupp reassigned CASSANDRA-8413:
---

Assignee: Robert Stupp  (was: Aleksey Yeschenko)

 Bloom filter false positive ratio is not honoured
 -

 Key: CASSANDRA-8413
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8413
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Benedict
Assignee: Robert Stupp
 Fix For: 2.1.4

 Attachments: 8413.hack.txt


 Whilst thinking about CASSANDRA-7438 and hash bits, I realised we have a 
 problem with sabotaging our bloom filters when using the murmur3 partitioner. 
 I have performed a very quick test to confirm this risk is real.
 Since a typical cluster uses the same murmur3 hash for partitioning as we do 
 for bloom filter lookups, and we own a contiguous range, we can guarantee 
 that the top X bits collide for all keys on the node. This translates into 
 poor bloom filter distribution. I quickly hacked LongBloomFilterTest to 
 simulate the problem, and the result in these tests is _up to_ a doubling of 
 the actual false positive ratio. The actual change will depend on the key 
 distribution, the number of keys, the false positive ratio, the number of 
 nodes, the token distribution, etc. But seems to be a real problem for 
 non-vnode clusters of at least ~128 nodes in size.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Resolved] (CASSANDRA-8865) DROP INDEX name case sensitivity causing errors in cass upgrade 2.0.10 to 2.1.3

2015-03-12 Thread Tyler Hobbs (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8865?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Tyler Hobbs resolved CASSANDRA-8865.

Resolution: Cannot Reproduce

Since I haven't gotten a response, I'm going to go ahead and close this as 
Cannot Reproduce for now.  If you are able to come up with some steps to 
reproduce the issue, please leave a comment and I'll re-open the ticket.

 DROP INDEX name case sensitivity causing errors in cass upgrade 2.0.10 to 
 2.1.3
 ---

 Key: CASSANDRA-8865
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8865
 Project: Cassandra
  Issue Type: Bug
  Components: Core
 Environment: Amazon, single node, ubuntu 14.04, jdk 7
Reporter: Constance Eustace
Assignee: Tyler Hobbs
 Fix For: 2.1.4


 We are upgrading our dev cluster. 2.0.10 to 2.1.3
 The indexes are behaving very strangely.
 create index definition_bundle__BundleDefSkuIDXTest on 
 definition_bundle.entity_bundledef(e_entlinks) ;
 definition_bundle select column_name, index_name, index_options, index_type, 
 component_index from system.schema_columns where keyspace_name = 
 'definition_bundle' and columnfamily_name = 'entity_bundledef';
  column_name | index_name | index_options | 
 index_type | component_index
 -++---++-
   bundle_sku | definition_bundle__BundleDefSkuIDX |{} | 
 COMPOSITES |   1
  e_entid |   null |  null |   
 null |null
   e_entlinks | definition_bundle__bundledefskuidxtest |{} | 
 COMPOSITES |   1
 NOTICE THE AUTO-DOWNCASE of our newly created index. The index that already 
 existed is NOT AUTO-DOWNCASED. I don't know if this is recent or not.
 We cannot drop the mixed case index. Nodetool index reconstruction did not 
 work. Indexes are doing very weird things.
 Hm. UPDATE:
 This did successfully delete the index:
 drop index definition_bundle__BundleDefSkuIDX;
 Anyway, it looks like there is some upcase/downcase assumptions not being 
 properly done somewhere, either in upgrades or similar stuff.
 We will probably drop our indexes and recreate them.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8941) Test Coverage for CASSANDRA-8786

2015-03-12 Thread Tyler Hobbs (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8941?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14355449#comment-14355449
 ] 

Tyler Hobbs commented on CASSANDRA-8941:


[~iamaleksey] any ideas about how to reproduce this consistently?

 Test Coverage for CASSANDRA-8786
 

 Key: CASSANDRA-8941
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8941
 Project: Cassandra
  Issue Type: Test
  Components: Tests
Reporter: Tyler Hobbs
Assignee: Philip Thompson
Priority: Minor
 Fix For: 2.1.4


 We don't currently have a test to reproduce the issue from CASSANDRA-8786.  
 It would be good to track down exactly what circustances cause this and add 
 some test coverage.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8937) add liveSSTableCount metrics at keyspace level

2015-03-12 Thread Chris Lohfink (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8937?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=1438#comment-1438
 ] 

Chris Lohfink commented on CASSANDRA-8937:
--

I think this would be a good keyspace one. +1 from more

 add liveSSTableCount metrics at keyspace level
 --

 Key: CASSANDRA-8937
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8937
 Project: Cassandra
  Issue Type: Improvement
  Components: Core
Reporter: yangwei
Assignee: yangwei
Priority: Minor
 Fix For: 2.1.4

 Attachments: 0001-add-liveSSTableCount-metrics-at-keyspace-level.patch


 We currently don't have liveSSTableCount metrics aggregated at keyspace 
 level.  If many sstables exists and we can't realize it earlier,  cassandra 
 will oom while doing scanning operations. It would be nice and easy to 
 aggregate it.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8527) Extend tombstone_warning_threshold to range tombstones

2015-03-12 Thread Aleksey Yeschenko (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8527?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358266#comment-14358266
 ] 

Aleksey Yeschenko commented on CASSANDRA-8527:
--

I'm not sure that changing the current behaviour in a minor release is 
something we can do safely. For one, some people's queries will suddenly and 
unexpectedly stop working, for two, it's a small, but non-trivial enough change 
for a minor, code-wise.

As for 3.0, should get CASSANDRA-8099 in first, assuming that it doesn't handle 
this already.

Either way, relabeling as 3.0 for now.

 Extend tombstone_warning_threshold to range tombstones
 --

 Key: CASSANDRA-8527
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8527
 Project: Cassandra
  Issue Type: Improvement
Reporter: Sylvain Lebresne
Assignee: Aleksey Yeschenko
 Fix For: 3.0


 As discussed in CASSANDRA-8477, we should make sure the tombstone thresholds 
 also apply to range tombstones, since they poses the same problems than cell 
 tombstones.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Assigned] (CASSANDRA-8553) Add a key-value payload for third party usage

2015-03-12 Thread Robert Stupp (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8553?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Robert Stupp reassigned CASSANDRA-8553:
---

Assignee: Robert Stupp

 Add a key-value payload for third party usage
 -

 Key: CASSANDRA-8553
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8553
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Sergio Bossa
Assignee: Robert Stupp
  Labels: client-impacting, protocolv4
 Fix For: 3.0


 An useful improvement would be to include a generic key-value payload, so 
 that developers implementing a custom {{QueryHandler}} could leverage that to 
 move custom data back and forth.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Assigned] (CASSANDRA-7881) SCHEMA_CHANGE Events and Responses should carry the Schema Version

2015-03-12 Thread Robert Stupp (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-7881?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Robert Stupp reassigned CASSANDRA-7881:
---

Assignee: Robert Stupp

 SCHEMA_CHANGE Events and Responses should carry the Schema Version
 --

 Key: CASSANDRA-7881
 URL: https://issues.apache.org/jira/browse/CASSANDRA-7881
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Michaël Figuière
Assignee: Robert Stupp
Priority: Minor
  Labels: protocolv4

 For similar logging and debugging purpose as exposed in CASSANDRA-7880, it 
 would be helpful to send to the client the previous and new schema version 
 UUID that were in use before and after a schema change operation, in the 
 {{SCHEMA_CHANGE}} events and responses in the protocol v4.
 This could then be exposed in the client APIs in order to bring much more 
 precise awareness of the actual status of the schema on each node.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8527) Extend tombstone_warning_threshold to range tombstones

2015-03-12 Thread Aleksey Yeschenko (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8527?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Aleksey Yeschenko updated CASSANDRA-8527:
-
Fix Version/s: (was: 2.1.4)
   3.0

 Extend tombstone_warning_threshold to range tombstones
 --

 Key: CASSANDRA-8527
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8527
 Project: Cassandra
  Issue Type: Improvement
Reporter: Sylvain Lebresne
Assignee: Aleksey Yeschenko
 Fix For: 3.0


 As discussed in CASSANDRA-8477, we should make sure the tombstone thresholds 
 also apply to range tombstones, since they poses the same problems than cell 
 tombstones.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8959) More efficient frozen UDT and tuple serialization format

2015-03-12 Thread Sylvain Lebresne (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8959?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358267#comment-14358267
 ] 

Sylvain Lebresne commented on CASSANDRA-8959:
-

For the record, this should also be extended to collections.

I'll note that there is 2 subparts to this: the internal encoding, and the one 
we send to clients. It's technically possible to not have the same encoding for 
both and translate when receiving/sending to clients, but what is inefficient 
internally is also inefficient on the native protocol so I'd suggest we switch 
to the same more efficient encoding for both (but for existing version of the 
native protocol, this does mean we'll have to translate to the old format, 
which is ok).

 More efficient frozen UDT and tuple serialization format
 

 Key: CASSANDRA-8959
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8959
 Project: Cassandra
  Issue Type: Improvement
Reporter: Aleksey Yeschenko
  Labels: performance
 Fix For: 3.1


 The current serialization format for UDTs has a fixed overhead of 4 bytes per 
 defined field (encoding the size of the field).
 It is inefficient for sparse UDTs - ones with many defined fields, but few of 
 them present. We could keep a bitset to indicate the missing fields, if any.
 It's sub-optimal for encoding UDTs with all the values present as well. We 
 could use varint encoding for the field sizes of blob/text fields and encode 
 'fixed' sized types directly, without the 4-bytes size prologue.
 That or something more brilliant. Any improvement right now is lhf.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8937) add liveSSTableCount metrics at keyspace level

2015-03-12 Thread Brandon Williams (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8937?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14355639#comment-14355639
 ] 

Brandon Williams commented on CASSANDRA-8937:
-

Just about everything, see CASSANDRA-6539 for the history.

 add liveSSTableCount metrics at keyspace level
 --

 Key: CASSANDRA-8937
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8937
 Project: Cassandra
  Issue Type: Improvement
  Components: Core
Reporter: yangwei
Assignee: yangwei
Priority: Minor
 Fix For: 2.1.4

 Attachments: 0001-add-liveSSTableCount-metrics-at-keyspace-level.patch


 We currently don't have liveSSTableCount metrics aggregated at keyspace 
 level.  If many sstables exists and we can't realize it earlier,  cassandra 
 will oom while doing scanning operations. It would be nice and easy to 
 aggregate it.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (CASSANDRA-8944) Add double_counter Type

2015-03-12 Thread Benjamin Coverston (JIRA)
Benjamin Coverston created CASSANDRA-8944:
-

 Summary: Add double_counter Type
 Key: CASSANDRA-8944
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8944
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Benjamin Coverston
 Fix For: 3.1


A double_counter type similar to the integer counter type that currently exists.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8946) Make SSTableScanner always respect its bound

2015-03-12 Thread Sylvain Lebresne (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8946?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14356886#comment-14356886
 ] 

Sylvain Lebresne commented on CASSANDRA-8946:
-

Marking as blocking CASSANDRA-8099 because the current patch there doesn't do 
the compensation that {{ColumnFamilyStore.getSequentialIterator}} does and the 
right fix is this ticket.

 Make SSTableScanner always respect its bound
 

 Key: CASSANDRA-8946
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8946
 Project: Cassandra
  Issue Type: Improvement
Reporter: Sylvain Lebresne

 When {{SSTableScanner}} takes a {{DataRange}}, it doesn't fully respect the 
 bounds provided as it always generate a {{Bounds}} object, thus potentially 
 ending up including a key it should have excluded. It's currently compensated 
 by in {{ColumnFamilyStore.getSequentialIterator}} but that is still an 
 unexpected behavior and is such error prone. We should fix that and remove 
 the compensation in {{ColumnFamilyStore.getSequentialIterator}}.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Assigned] (CASSANDRA-7807) Push notification when tracing completes for an operation

2015-03-12 Thread Robert Stupp (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-7807?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Robert Stupp reassigned CASSANDRA-7807:
---

Assignee: Robert Stupp

 Push notification when tracing completes for an operation
 -

 Key: CASSANDRA-7807
 URL: https://issues.apache.org/jira/browse/CASSANDRA-7807
 Project: Cassandra
  Issue Type: Sub-task
  Components: Core
Reporter: Tyler Hobbs
Assignee: Robert Stupp
Priority: Minor
  Labels: protocolv4
 Fix For: 3.0


 Tracing is an asynchronous operation, and drivers currently poll to determine 
 when the trace is complete (in a loop with sleeps).  Instead, the server 
 could push a notification to the driver when the trace completes.
 I'm guessing that most of the work for this will be around pushing 
 notifications to a single connection instead of all connections that have 
 registered listeners for a particular event type.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8938) Full Row Scan does not count towards Reads

2015-03-12 Thread Phil Yang (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8938?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358257#comment-14358257
 ] 

Phil Yang commented on CASSANDRA-8938:
--

I'm not an expert. Do you mean full row scan is selecting all rows in a 
table? The full table scan seems need to read all sstables, so compaction that 
reduces the number of sstables will not reduce the disk IO while scanning the 
whole row. I think it is the reason that sstables will not be marked as hot 
after the full row scan.

For your first question, it seems that there are two read counter, one is 
readLatency the other is rangeLatency, and nodetool only returns the count 
of the first one so your scan query does not affect it.

 Full Row Scan does not count towards Reads
 --

 Key: CASSANDRA-8938
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8938
 Project: Cassandra
  Issue Type: Bug
  Components: API, Core, Tools
 Environment: Unix, Cassandra 2.0.3
Reporter: Amit Singh Chowdhery
Priority: Minor
  Labels: none

 When a CQL SELECT statement is executed with WHERE clause, Read Count is 
 incremented in cfstats of the column family. But, when a full row scan is 
 done using SELECT statement without WHERE clause, Read Count is not 
 incremented. 
 Similarly, when using Size Tiered Compaction, if we do a full row scan using 
 Hector RangeslicesQuery, Read Count is not incremented in cfstats, Cassandra 
 still considers all sstables as cold and does not trigger compaction for 
 them. If we fire MultigetSliceQuery, Read Count is incremented and sstables 
 becomes hot, triggering compaction of these sstables. 
 Expected Behavior:
 1. Read Count must be incremented by number of rows read during a full row 
 scan done using CQL SELECT statement or Hector RangeslicesQuery.
 2. Size Tiered compaction must consider all sstables as Hot after a full row 
 scan.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8948) cassandra-stress does not honour consistency level (cl) parameter when used in combination with user command

2015-03-12 Thread T Jake Luciani (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8948?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

T Jake Luciani updated CASSANDRA-8948:
--
Reviewer: Benedict  (was: Andreas Fink)

 cassandra-stress does not honour consistency level (cl) parameter when used 
 in combination with user command
 

 Key: CASSANDRA-8948
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8948
 Project: Cassandra
  Issue Type: Bug
  Components: Tools
Reporter: Andreas Flinck
Assignee: T Jake Luciani
 Fix For: 2.1.4

 Attachments: 8948.txt


 The stress test tool does not honour cl parameter when used in combination 
 with the user command. Consistency level will be default ONE no matter what 
 is set by cl=.
 Works fine with write command.
 How to reproduce:
 1. Create a suitable yaml-file to use in test
 2. Run e.g. {code}./cassandra-stress user profile=./file.yaml cl=ALL 
 no-warmup duration=10s  ops\(insert=1\) -rate threads=4 -port jmx=7100{code}
 3. Observe that cl=ONE in trace logs



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[2/2] cassandra git commit: Merge branch 'cassandra-2.1' into trunk

2015-03-12 Thread jake
Merge branch 'cassandra-2.1' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/f843b42e
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/f843b42e
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/f843b42e

Branch: refs/heads/trunk
Commit: f843b42e87efc3d7cd167aa99d938109a51e7088
Parents: bc7941c 4646014
Author: T Jake Luciani j...@apache.org
Authored: Wed Mar 11 12:59:55 2015 -0400
Committer: T Jake Luciani j...@apache.org
Committed: Wed Mar 11 12:59:55 2015 -0400

--
 CHANGES.txt   | 1 +
 .../cassandra/stress/operations/userdefined/SchemaStatement.java  | 3 +++
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/f843b42e/CHANGES.txt
--
diff --cc CHANGES.txt
index b999577,c8a4a84..c67acd1
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,74 -1,5 +1,75 @@@
 +3.0
 + * Convert SequentialWriter to nio (CASSANDRA-8709)
 + * Add role based access control (CASSANDRA-7653, 8650, 7216, 8760, 8849, 
8761, 8850)
 + * Record client ip address in tracing sessions (CASSANDRA-8162)
 + * Indicate partition key columns in response metadata for prepared
 +   statements (CASSANDRA-7660)
 + * Merge UUIDType and TimeUUIDType parse logic (CASSANDRA-8759)
 + * Avoid memory allocation when searching index summary (CASSANDRA-8793)
 + * Optimise (Time)?UUIDType Comparisons (CASSANDRA-8730)
 + * Make CRC32Ex into a separate maven dependency (CASSANDRA-8836)
 + * Use preloaded jemalloc w/ Unsafe (CASSANDRA-8714)
 + * Avoid accessing partitioner through StorageProxy (CASSANDRA-8244, 8268)
 + * Upgrade Metrics library and remove depricated metrics (CASSANDRA-5657)
 + * Serializing Row cache alternative, fully off heap (CASSANDRA-7438)
 + * Duplicate rows returned when in clause has repeated values (CASSANDRA-6707)
 + * Make CassandraException unchecked, extend RuntimeException (CASSANDRA-8560)
 + * Support direct buffer decompression for reads (CASSANDRA-8464)
 + * DirectByteBuffer compatible LZ4 methods (CASSANDRA-7039)
 + * Group sstables for anticompaction correctly (CASSANDRA-8578)
 + * Add ReadFailureException to native protocol, respond
 +   immediately when replicas encounter errors while handling
 +   a read request (CASSANDRA-7886)
 + * Switch CommitLogSegment from RandomAccessFile to nio (CASSANDRA-8308)
 + * Allow mixing token and partition key restrictions (CASSANDRA-7016)
 + * Support index key/value entries on map collections (CASSANDRA-8473)
 + * Modernize schema tables (CASSANDRA-8261)
 + * Support for user-defined aggregation functions (CASSANDRA-8053)
 + * Fix NPE in SelectStatement with empty IN values (CASSANDRA-8419)
 + * Refactor SelectStatement, return IN results in natural order instead
 +   of IN value list order and ignore duplicate values in partition key IN 
restrictions (CASSANDRA-7981)
 + * Support UDTs, tuples, and collections in user-defined
 +   functions (CASSANDRA-7563)
 + * Fix aggregate fn results on empty selection, result column name,
 +   and cqlsh parsing (CASSANDRA-8229)
 + * Mark sstables as repaired after full repair (CASSANDRA-7586)
 + * Extend Descriptor to include a format value and refactor reader/writer
 +   APIs (CASSANDRA-7443)
 + * Integrate JMH for microbenchmarks (CASSANDRA-8151)
 + * Keep sstable levels when bootstrapping (CASSANDRA-7460)
 + * Add Sigar library and perform basic OS settings check on startup 
(CASSANDRA-7838)
 + * Support for aggregation functions (CASSANDRA-4914)
 + * Remove cassandra-cli (CASSANDRA-7920)
 + * Accept dollar quoted strings in CQL (CASSANDRA-7769)
 + * Make assassinate a first class command (CASSANDRA-7935)
 + * Support IN clause on any partition key column (CASSANDRA-7855)
 + * Support IN clause on any clustering column (CASSANDRA-4762)
 + * Improve compaction logging (CASSANDRA-7818)
 + * Remove YamlFileNetworkTopologySnitch (CASSANDRA-7917)
 + * Do anticompaction in groups (CASSANDRA-6851)
 + * Support user-defined functions (CASSANDRA-7395, 7526, 7562, 7740, 7781, 
7929,
 +   7924, 7812, 8063, 7813, 7708)
 + * Permit configurable timestamps with cassandra-stress (CASSANDRA-7416)
 + * Move sstable RandomAccessReader to nio2, which allows using the
 +   FILE_SHARE_DELETE flag on Windows (CASSANDRA-4050)
 + * Remove CQL2 (CASSANDRA-5918)
 + * Add Thrift get_multi_slice call (CASSANDRA-6757)
 + * Optimize fetching multiple cells by name (CASSANDRA-6933)
 + * Allow compilation in java 8 (CASSANDRA-7028)
 + * Make incremental repair default (CASSANDRA-7250)
 + * Enable code coverage thru JaCoCo (CASSANDRA-7226)
 + * Switch external naming of 'column families' to 'tables' (CASSANDRA-4369) 
 + * Shorten SSTable path (CASSANDRA-6962)
 + * Use unsafe mutations 

[jira] [Commented] (CASSANDRA-8553) Add a key-value payload for third party usage

2015-03-12 Thread Sylvain Lebresne (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8553?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358392#comment-14358392
 ] 

Sylvain Lebresne commented on CASSANDRA-8553:
-

Well, it's not just going into {{QUERY}} messages, but at least {{EXECUTE}} and 
{{BATCH}} too. And probably {{PREPARE}} too since it goes through the 
{{QueryHandler}} too. And really, the idea of this is to be as generic as 
possible, so at least in the spec, it's meant to be a frame level thing, not 
something specific to any particular message really. So when I'm talking about 
unused flag, I'm not refering to the query_parameters one, which would be 
inappropriate for this, but rather the frame level one, i.e. the one describe 
at the section 2.2 of the spec. And we have plenty of space for this one.

bq. What does generic kv payload mean?

The equivalent of a mapstring, blob is what would make the most sense to me. 
Or, in the terms of the spec, it would be a:
{noformat}
[kv map]:  A [short] n, followed by n pair kv where k is a [string] and 
v is [bytes].
{noformat}

 Add a key-value payload for third party usage
 -

 Key: CASSANDRA-8553
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8553
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Sergio Bossa
Assignee: Robert Stupp
  Labels: client-impacting, protocolv4
 Fix For: 3.0


 An useful improvement would be to include a generic key-value payload, so 
 that developers implementing a custom {{QueryHandler}} could leverage that to 
 move custom data back and forth.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8938) Full Row Scan does not count towards Reads

2015-03-12 Thread Phil Yang (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8938?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358327#comment-14358327
 ] 

Phil Yang commented on CASSANDRA-8938:
--

The readLatency is a counter both counting read count and read latency 
for the query with WHERE clause. I think the reason that nodetool only show the 
count of readLatency rather than readLatency+rangeLatency is that usually it is 
not a good idea to select the whole table(because it will read all sstables in 
all nodes) so it is not commonly used and usually be zero. 

Anyone other think the read count and read latency should add the counter of 
range read?

Why do you think all sstables must be available for next compaction after a 
range read? It has few help for range read and compaction will increase the 
load of all nodes.

 Full Row Scan does not count towards Reads
 --

 Key: CASSANDRA-8938
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8938
 Project: Cassandra
  Issue Type: Bug
  Components: API, Core, Tools
 Environment: Unix, Cassandra 2.0.3
Reporter: Amit Singh Chowdhery
Priority: Minor
  Labels: none

 When a CQL SELECT statement is executed with WHERE clause, Read Count is 
 incremented in cfstats of the column family. But, when a full row scan is 
 done using SELECT statement without WHERE clause, Read Count is not 
 incremented. 
 Similarly, when using Size Tiered Compaction, if we do a full row scan using 
 Hector RangeslicesQuery, Read Count is not incremented in cfstats, Cassandra 
 still considers all sstables as cold and does not trigger compaction for 
 them. If we fire MultigetSliceQuery, Read Count is incremented and sstables 
 becomes hot, triggering compaction of these sstables. 
 Expected Behavior:
 1. Read Count must be incremented by number of rows read during a full row 
 scan done using CQL SELECT statement or Hector RangeslicesQuery.
 2. Size Tiered compaction must consider all sstables as Hot after a full row 
 scan.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8947) Cleanup Cell equality

2015-03-12 Thread Sylvain Lebresne (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8947?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14357049#comment-14357049
 ] 

Sylvain Lebresne commented on CASSANDRA-8947:
-

+1

(nit: I'd make the test run on both buffer and native cells, just because 
that's easily done)

 Cleanup Cell equality
 -

 Key: CASSANDRA-8947
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8947
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Benedict
Assignee: Benedict
Priority: Minor
 Fix For: 2.1.4

 Attachments: cleanup_cell_equality.txt


 As pointed out by [~dbrosius], it is possible for AbstractCell to generate 
 false positive equality assertions if, e.g., a non-expiring cell is replaced 
 by an expiring equivalent. It's not clear to me if this would ever be a real 
 problem, since we would reconcile correctly, and as such equality would 
 unlikely ever be a problem (it's used rarely, only really for hash maps I 
 think (although auditing this is difficult) in which the hash code would also 
 be different, and by which point we should have resolved conflicts). It's 
 also a very small exposure profile, with either non-ttl'd writes mixed with 
 ttl'd, or a delete of a value representing the deletion timestamp.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8938) Full Row Scan does not count towards Reads

2015-03-12 Thread Phil Yang (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8938?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358326#comment-14358326
 ] 

Phil Yang commented on CASSANDRA-8938:
--

The readLatency is a counter both counting read count and read latency 
for the query with WHERE clause. I think the reason that nodetool only show the 
count of readLatency rather than readLatency+rangeLatency is that usually it is 
not a good idea to select the whole table(because it will read all sstables in 
all nodes) so it is not commonly used and usually be zero. 

Anyone other think the read count and read latency should add the counter of 
range read?

Why do you think all sstables must be available for next compaction after a 
range read? It has few help for range read and compaction will increase the 
load of all nodes.

 Full Row Scan does not count towards Reads
 --

 Key: CASSANDRA-8938
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8938
 Project: Cassandra
  Issue Type: Bug
  Components: API, Core, Tools
 Environment: Unix, Cassandra 2.0.3
Reporter: Amit Singh Chowdhery
Priority: Minor
  Labels: none

 When a CQL SELECT statement is executed with WHERE clause, Read Count is 
 incremented in cfstats of the column family. But, when a full row scan is 
 done using SELECT statement without WHERE clause, Read Count is not 
 incremented. 
 Similarly, when using Size Tiered Compaction, if we do a full row scan using 
 Hector RangeslicesQuery, Read Count is not incremented in cfstats, Cassandra 
 still considers all sstables as cold and does not trigger compaction for 
 them. If we fire MultigetSliceQuery, Read Count is incremented and sstables 
 becomes hot, triggering compaction of these sstables. 
 Expected Behavior:
 1. Read Count must be incremented by number of rows read during a full row 
 scan done using CQL SELECT statement or Hector RangeslicesQuery.
 2. Size Tiered compaction must consider all sstables as Hot after a full row 
 scan.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (CASSANDRA-8948) consistency level (cl) parameter is not honoured in combination with user command

2015-03-12 Thread Andreas Flinck (JIRA)
Andreas Flinck created CASSANDRA-8948:
-

 Summary: consistency level (cl) parameter is not honoured in 
combination with user command
 Key: CASSANDRA-8948
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8948
 Project: Cassandra
  Issue Type: Bug
  Components: Tools
Reporter: Andreas Flinck
Priority: Minor


The stress test tool does not honour cl parameter when used in combination 
with the user command. Consistency level will be default ONE no matter what 
is set by cl=.

Works fine with write command.

How to reproduce:
1. Create a suitable yaml-file to use in test
2. Run e.g. {code}./cassandra-stress user profile=./file.yaml cl=ALL no-warmup 
duration=10s  ops\(insert=1\) -rate threads=4 -port jmx=7100{code}
3. Observe that cl=ONE in trace logs



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Issue Comment Deleted] (CASSANDRA-8938) Full Row Scan does not count towards Reads

2015-03-12 Thread Phil Yang (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8938?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Phil Yang updated CASSANDRA-8938:
-
Comment: was deleted

(was: The readLatency is a counter both counting read count and read 
latency for the query with WHERE clause. I think the reason that nodetool only 
show the count of readLatency rather than readLatency+rangeLatency is that 
usually it is not a good idea to select the whole table(because it will read 
all sstables in all nodes) so it is not commonly used and usually be zero. 

Anyone other think the read count and read latency should add the counter of 
range read?

Why do you think all sstables must be available for next compaction after a 
range read? It has few help for range read and compaction will increase the 
load of all nodes.)

 Full Row Scan does not count towards Reads
 --

 Key: CASSANDRA-8938
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8938
 Project: Cassandra
  Issue Type: Bug
  Components: API, Core, Tools
 Environment: Unix, Cassandra 2.0.3
Reporter: Amit Singh Chowdhery
Priority: Minor
  Labels: none

 When a CQL SELECT statement is executed with WHERE clause, Read Count is 
 incremented in cfstats of the column family. But, when a full row scan is 
 done using SELECT statement without WHERE clause, Read Count is not 
 incremented. 
 Similarly, when using Size Tiered Compaction, if we do a full row scan using 
 Hector RangeslicesQuery, Read Count is not incremented in cfstats, Cassandra 
 still considers all sstables as cold and does not trigger compaction for 
 them. If we fire MultigetSliceQuery, Read Count is incremented and sstables 
 becomes hot, triggering compaction of these sstables. 
 Expected Behavior:
 1. Read Count must be incremented by number of rows read during a full row 
 scan done using CQL SELECT statement or Hector RangeslicesQuery.
 2. Size Tiered compaction must consider all sstables as Hot after a full row 
 scan.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8553) Add a key-value payload for third party usage

2015-03-12 Thread Robert Stupp (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8553?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358362#comment-14358362
 ] 

Robert Stupp commented on CASSANDRA-8553:
-

So this is going into {{QUERY}} protocol message. There's only one bit left. I 
think it's not a good idea to use that last bit when we stick to 8 bits for the 
flag. I see two options here:
# Extend {{flags}} in {{QUERY}} to 16 bits
# Add a second flags-byte if {{0x80}} is set in {{flags}} in {{QUERY}} (so 
we effectively get 7 more flags, reserving {{0x80}} in the second byte)
# Add some kind of key-value pair list for descent query parameters if {{0x80}} 
is set in {{flags}} in {{QUERY}}

I propose option 2 - but would like to hear other opinions/proposals on this.

What does _generic kv payload_ mean? Just passing blobs or passing a (string) 
key plus a typed (serialized) value?

 Add a key-value payload for third party usage
 -

 Key: CASSANDRA-8553
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8553
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Sergio Bossa
Assignee: Robert Stupp
  Labels: client-impacting, protocolv4
 Fix For: 3.0


 An useful improvement would be to include a generic key-value payload, so 
 that developers implementing a custom {{QueryHandler}} could leverage that to 
 move custom data back and forth.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-5174) expose nodetool scrub for 2Is

2015-03-12 Thread Stefania (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-5174?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358410#comment-14358410
 ] 

Stefania commented on CASSANDRA-5174:
-

Hello [~jbellis], a few questions:
\\
# When scrubbing a non-index cf do we want to automatically scrub the 
associated secondary index cfs?
# When we detect corruption whilst scrubbing an index cf, should we abort and 
rebuild the index regardless of the skipCorrupted parameter?
# Do we block the nodetool client until the index is rebuilt in case of failure 
or do we let it run asynchronously?
# Do we need to support the standalone scrubber as well?
\\
\\

 expose nodetool scrub for 2Is
 -

 Key: CASSANDRA-5174
 URL: https://issues.apache.org/jira/browse/CASSANDRA-5174
 Project: Cassandra
  Issue Type: Task
  Components: Core, Tools
Reporter: Jason Brown
Assignee: Stefania
Priority: Minor
 Fix For: 3.0


 Continuation of CASSANDRA-4464, where many other nodetool operations were 
 added for 2Is. This ticket supports scrub fo 2Is and is in its own ticket due 
 to the riskiness of deleting data on a bad bug.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8959) More efficient frozen UDT and tuple serialization format

2015-03-12 Thread Benedict (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8959?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358508#comment-14358508
 ] 

Benedict commented on CASSANDRA-8959:
-

bq. and what Aleksey describes is pretty close to things that CASSANDRA-8099 
currently does as it happens

Yes, although a little closer to what I outlined in 7447, but they're all 
shades of the same coin. This was kind of my point - we should design an 
approach that we consider optimal, preferably abstract it, and use the same 
tool in each place, including here.

FTR, I think the encoding (for both) should include a flag in the row header 
that encodes the kind of encoding, which should itself be either a header 
bitmap for inclusion, a bitmap for exclusion, or a sequence of name ids 
(denormalised in the same manner as column names for a table), and this 
approach should be used for the table format as well. I think it should be 
based on the schema at the time, not the fields present in the table, so that 
this mapping can be interned.

 More efficient frozen UDT and tuple serialization format
 

 Key: CASSANDRA-8959
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8959
 Project: Cassandra
  Issue Type: Improvement
Reporter: Aleksey Yeschenko
  Labels: performance
 Fix For: 3.1


 The current serialization format for UDTs has a fixed overhead of 4 bytes per 
 defined field (encoding the size of the field).
 It is inefficient for sparse UDTs - ones with many defined fields, but few of 
 them present. We could keep a bitset to indicate the missing fields, if any.
 It's sub-optimal for encoding UDTs with all the values present as well. We 
 could use varint encoding for the field sizes of blob/text fields and encode 
 'fixed' sized types directly, without the 4-bytes size prologue.
 That or something more brilliant. Any improvement right now is lhf.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8553) Add a key-value payload for third party usage

2015-03-12 Thread Robert Stupp (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8553?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358405#comment-14358405
 ] 

Robert Stupp commented on CASSANDRA-8553:
-

Thanks for the explanation. Makes sense for me. So it's basically even easier 
on the coding side :)

 Add a key-value payload for third party usage
 -

 Key: CASSANDRA-8553
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8553
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Sergio Bossa
Assignee: Robert Stupp
  Labels: client-impacting, protocolv4
 Fix For: 3.0


 An useful improvement would be to include a generic key-value payload, so 
 that developers implementing a custom {{QueryHandler}} could leverage that to 
 move custom data back and forth.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8959) More efficient frozen UDT and tuple serialization format

2015-03-12 Thread Sylvain Lebresne (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8959?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358494#comment-14358494
 ] 

Sylvain Lebresne commented on CASSANDRA-8959:
-

bq. Shouldn't this just be a replication of whatever strategy we choose for 
encoding tables?

Not entirely sure I understand what this refers to exactly. But if you mean 
that the more efficient we should use should try to be as close as possible to 
whatever we do in the sstable format, then I agree (and what Aleksey describes 
is pretty close to things that CASSANDRA-8099 currently does as it happens).

 More efficient frozen UDT and tuple serialization format
 

 Key: CASSANDRA-8959
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8959
 Project: Cassandra
  Issue Type: Improvement
Reporter: Aleksey Yeschenko
  Labels: performance
 Fix For: 3.1


 The current serialization format for UDTs has a fixed overhead of 4 bytes per 
 defined field (encoding the size of the field).
 It is inefficient for sparse UDTs - ones with many defined fields, but few of 
 them present. We could keep a bitset to indicate the missing fields, if any.
 It's sub-optimal for encoding UDTs with all the values present as well. We 
 could use varint encoding for the field sizes of blob/text fields and encode 
 'fixed' sized types directly, without the 4-bytes size prologue.
 That or something more brilliant. Any improvement right now is lhf.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8099) Refactor and modernize the storage engine

2015-03-12 Thread Benedict (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8099?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358672#comment-14358672
 ] 

Benedict commented on CASSANDRA-8099:
-

I think it's a shame this patch wasn't attempted at least a little more 
incrementally. It looks to me that changing the serialization formats, 
memtables and iterator implementations could have been done in different 
patches at least, and it might have made review safer and easier, and given us 
more time to digest the changes. I understand that this might have introduced 
some extra development burden, although that might have been reaped back in 
many fewer days spent rebasing. Having an initial patch to change the 
abstractions I think would have helped to reduce the burden on the rest of the 
project, and perhaps helped parallelize the work as well.

I'm a little concerned that the result is that we won't give each of the pretty 
major decisions that have been made the time they need to be assessed properly, 
especially now we're ramping up for release (and hence low on time). I'm not 
necessarily suggesting we split it, as I can imagine that would be soul 
crushingly unpleasant for [~slebresne] and introduce a delay, but I am 
generally ill at ease with the scope of the changes and our ability to vet 
them. I'm also worried I'm finding myself saying too close to release to 
question this decision - which seems a problematic mode to be merging any 
patch under.

 Refactor and modernize the storage engine
 -

 Key: CASSANDRA-8099
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8099
 Project: Cassandra
  Issue Type: Improvement
Reporter: Sylvain Lebresne
Assignee: Sylvain Lebresne
 Fix For: 3.0

 Attachments: 8099-nit


 The current storage engine (which for this ticket I'll loosely define as the 
 code implementing the read/write path) is suffering from old age. One of the 
 main problem is that the only structure it deals with is the cell, which 
 completely ignores the more high level CQL structure that groups cell into 
 (CQL) rows.
 This leads to many inefficiencies, like the fact that during a reads we have 
 to group cells multiple times (to count on replica, then to count on the 
 coordinator, then to produce the CQL resultset) because we forget about the 
 grouping right away each time (so lots of useless cell names comparisons in 
 particular). But outside inefficiencies, having to manually recreate the CQL 
 structure every time we need it for something is hindering new features and 
 makes the code more complex that it should be.
 Said storage engine also has tons of technical debt. To pick an example, the 
 fact that during range queries we update {{SliceQueryFilter.count}} is pretty 
 hacky and error prone. Or the overly complex ways {{AbstractQueryPager}} has 
 to go into to simply remove the last query result.
 So I want to bite the bullet and modernize this storage engine. I propose to 
 do 2 main things:
 # Make the storage engine more aware of the CQL structure. In practice, 
 instead of having partitions be a simple iterable map of cells, it should be 
 an iterable list of row (each being itself composed of per-column cells, 
 though obviously not exactly the same kind of cell we have today).
 # Make the engine more iterative. What I mean here is that in the read path, 
 we end up reading all cells in memory (we put them in a ColumnFamily object), 
 but there is really no reason to. If instead we were working with iterators 
 all the way through, we could get to a point where we're basically 
 transferring data from disk to the network, and we should be able to reduce 
 GC substantially.
 Please note that such refactor should provide some performance improvements 
 right off the bat but it's not it's primary goal either. It's primary goal is 
 to simplify the storage engine and adds abstraction that are better suited to 
 further optimizations.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8950) NullPointerException in nodetool getendpoints with non-existent keyspace or table

2015-03-12 Thread Stefania (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8950?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359843#comment-14359843
 ] 

Stefania commented on CASSANDRA-8950:
-

To print out a message an {{IllegalArgumentException}} does the job in 2.1. In 
2.0 however, I also had to handle the exception. For this reason I separated 
the patches:

https://github.com/stef1927/cassandra/tree/8950-2.0
https://github.com/stef1927/cassandra/tree/8950-2.1

Do you want something more generic for 2.0? It's a bit of a hack to handle the 
{{IllegalArgumentException}} just for one case. Else we could simply not catch 
the exception but we'd have to move the check outside of jmx to make the 
exception smaller (just like cfStats).

 NullPointerException in nodetool getendpoints with non-existent keyspace or 
 table
 -

 Key: CASSANDRA-8950
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8950
 Project: Cassandra
  Issue Type: Bug
  Components: Tools
Reporter: Tyler Hobbs
Assignee: Stefania
Priority: Minor
 Fix For: 2.1.4, 2.0.14


 If {{nodetool getendpoints}} is run with a non-existent keyspace or table 
 table, a NullPointerException will occur:
 {noformat}
 ~/cassandra $ bin/nodetool getendpoints badkeyspace badtable mykey
 error: null
 -- StackTrace --
 java.lang.NullPointerException
   at 
 org.apache.cassandra.service.StorageService.getNaturalEndpoints(StorageService.java:2914)
   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
   at 
 sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75)
   at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279)
   at 
 com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112)
   at 
 com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46)
   at 
 com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237)
   at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138)
   at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252)
   at 
 com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819)
   at 
 com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97)
   at 
 javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848)
   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
   at 
 sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322)
   at sun.rmi.transport.Transport$1.run(Transport.java:177)
   at sun.rmi.transport.Transport$1.run(Transport.java:174)
   at java.security.AccessController.doPrivileged(Native Method)
   at sun.rmi.transport.Transport.serviceCall(Transport.java:173)
   at 
 sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:556)
   at 
 sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run0(TCPTransport.java:811)
   at 
 sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run(TCPTransport.java:670)
   at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
   at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
   at java.lang.Thread.run(Thread.java:724)
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8838) Resumable bootstrap streaming

2015-03-12 Thread Stefania (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8838?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359941#comment-14359941
 ] 

Stefania commented on CASSANDRA-8838:
-

Only one comment on the code: 
# we are not looking at the new flag when replacing why is that?

Let's focus on the tests:

# Do you think we should add a unit test for {{resetAvailableRanges()}} since 
this code path is the only code path that was added to {{SystemKeyspace}} and 
that is not covered by {{StreamStoreStateTest}}?
# Not sure how easy it would be, we'd have to rework {{RangeStreamer}} a bit 
probably, but we could add a unit test to {{BootStrapperTest}} that checks that 
the {{RangeStreamer}} is not requesting available ranges (since this would be 
hard to check in dtest). Up to you if you want to do this or not.
# The dtest {{bootstrap_test.py}} is currently failing (not related to your 
patch as it is failing on trunk as well). See the failures I got below. 
## It would be good to fix these failures (not necessarily yourself but someone 
in QA). 
## I would also be beneficial to add a couple more dtests to simulate a 
bootstrap failure and test the resume with and without 
{{cassandra.reset_bootstrap_progress}}. Not sure how easy it would be to 
simulate the failure, perhaps you'd have to add a cassandra.test flag.
## Perhaps we should have more tests for replacing a node (didn't see any in 
dtests) with and without a failed bootstrap.

What do you think is this reasonable for testing or did you do more manual 
tests? If you tested manually with ccm and nodetool perhaps we should simply 
list the tests here and let QA add them to dtest or the new tool for functional 
tests that Ariel is working on.

Dtest failure on trunk:
{code}
==
FAIL: simple_bootstrap_test (bootstrap_test.TestBootstrap)
--
Traceback (most recent call last):
  File /home/stefania/git/cstar/cassandra-dtest/bootstrap_test.py, line 66, 
in simple_bootstrap_test
reader.check()
  File /home/stefania/git/cstar/cassandra-dtest/dtest.py, line 115, in check
raise self.__error
AssertionError: 
  begin captured logging  
dtest: DEBUG: cluster ccm directory: /tmp/dtest-510qhu
dtest: DEBUG: connecting...
dtest: DEBUG: reading...
-  end captured logging  -
{code}

Dtest failure on 8838-2:
{code}
==
ERROR: simple_bootstrap_test (bootstrap_test.TestBootstrap)
--
Traceback (most recent call last):
  File /home/stefania/git/cstar/cassandra-dtest/bootstrap_test.py, line 74, 
in simple_bootstrap_test
assert_almost_equal(size1, size2, error=0.3)
  File /home/stefania/git/cstar/cassandra-dtest/assertions.py, line 56, in 
assert_almost_equal
assert vmin  vmax * (1.0 - error) or vmin == vmax, values not within 
%.2f%% of the max: %s % (error * 100, args)
TypeError: unsupported operand type(s) for *: 'Decimal' and 'float'
  begin captured logging  
dtest: DEBUG: cluster ccm directory: /tmp/dtest-4sMR0c
dtest: DEBUG: connecting...
dtest: DEBUG: reading...
dtest: DEBUG: initial_size: 482.41
dtest: DEBUG: size1: 305.65
dtest: DEBUG: size2: 287.5
-  end captured logging  -
{code}

 Resumable bootstrap streaming
 -

 Key: CASSANDRA-8838
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8838
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Yuki Morishita
Assignee: Yuki Morishita
Priority: Minor
  Labels: dense-storage
 Fix For: 3.0


 This allows the bootstrapping node not to be streamed already received data.
 The bootstrapping node records received keyspace/ranges as one stream session 
 completes. When some sessions with other nodes fail, bootstrapping fails 
 also, though next time it re-bootstraps, already received keyspace/ranges are 
 skipped to be streamed.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-4230) Deleting a CF always produces an error and that CF remains in an unknown state

2015-03-12 Thread Cristian Manuel Vertiz Fernandez (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-4230?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Cristian Manuel Vertiz Fernandez updated CASSANDRA-4230:

Attachment: cassandraDEBUG.log

 Deleting a CF always produces an error and that CF remains in an unknown state
 --

 Key: CASSANDRA-4230
 URL: https://issues.apache.org/jira/browse/CASSANDRA-4230
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Affects Versions: 1.1.0
 Environment: Debian Linux Squeeze with the cassandra debian package 
 from Apache.
Reporter: André Cruz
Assignee: Pavel Yaskevich
 Fix For: 1.1.1

 Attachments: 4230-v2.txt, CASSANDRA-4230.patch, cassandraDEBUG.log


 From the CLI perspective:
 [default@Disco] drop column family client; 
 null
 org.apache.thrift.transport.TTransportException
   at 
 org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
   at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
   at 
 org.apache.thrift.transport.TFramedTransport.readFrame(TFramedTransport.java:129)
   at 
 org.apache.thrift.transport.TFramedTransport.read(TFramedTransport.java:101)
   at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:378)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:297)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:204)
   at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:69)
   at 
 org.apache.cassandra.thrift.Cassandra$Client.recv_system_drop_column_family(Cassandra.java:1222)
   at 
 org.apache.cassandra.thrift.Cassandra$Client.system_drop_column_family(Cassandra.java:1209)
   at 
 org.apache.cassandra.cli.CliClient.executeDelColumnFamily(CliClient.java:1301)
   at 
 org.apache.cassandra.cli.CliClient.executeCLIStatement(CliClient.java:234)
   at 
 org.apache.cassandra.cli.CliMain.processStatementInteractive(CliMain.java:219)
   at org.apache.cassandra.cli.CliMain.main(CliMain.java:346)
 Log:
  INFO [MigrationStage:1] 2012-05-09 11:25:35,686 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-schema_columnfamilies@225225949(978/1222 
 serialized/live bytes, 21 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,687 Memtable.java (line 266) 
 Writing Memtable-schema_columnfamilies@225225949(978/1222 serialized/live 
 bytes, 21 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,748 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/system/schema_columnfamilies/system-schema_columnfamilies-hc-34-Data.db
  (1041 bytes)
  INFO [MigrationStage:1] 2012-05-09 11:25:35,749 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-schema_columns@213209572(586/732 
 serialized/live bytes, 12 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,750 Memtable.java (line 266) 
 Writing Memtable-schema_columns@213209572(586/732 serialized/live bytes, 12 
 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,812 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-28-Data.db
  (649 bytes)
  INFO [CompactionExecutor:20] 2012-05-09 11:25:35,814 CompactionTask.java 
 (line 114) Compacting 
 [SSTableReader(path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-27-Data.db'),
  SSTableReader
 (path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-25-Data.db'),
  
 SSTableReader(path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-26-Data.db'),
  SSTableReader(path
 ='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-28-Data.db')]
  INFO [MigrationStage:1] 2012-05-09 11:25:35,918 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-Client@864320066(372/465 serialized/live 
 bytes, 6 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,919 Memtable.java (line 266) 
 Writing Memtable-Client@864320066(372/465 serialized/live bytes, 6 ops)
  INFO [CompactionExecutor:20] 2012-05-09 11:25:35,945 CompactionTask.java 
 (line 225) Compacted to 
 [/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-29-Data.db,].
   22,486 to 20,621 (~91% of orig
 inal) bytes for 2 keys at 0.150120MB/s.  Time: 131ms.
  INFO [FlushWriter:3] 2012-05-09 11:25:36,013 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/Disco/Client/Disco-Client-hc-5-Data.db (407 bytes)
 ERROR [MigrationStage:1] 2012-05-09 11:25:36,043 CLibrary.java (line 158) 
 Unable to create hard link
 com.sun.jna.LastErrorException: errno was 17
 at 

[jira] [Commented] (CASSANDRA-8950) NullPointerException in nodetool getendpoints with non-existent keyspace or table

2015-03-12 Thread Tyler Hobbs (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8950?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358865#comment-14358865
 ] 

Tyler Hobbs commented on CASSANDRA-8950:


Sure, I can review.

Instead of not returning any endpoints, let's print a message about the 
keyspace (or table) not existing and return a non-zero exit status.  See the 
nodetool cfstats behavior in 2.1 for an example.

 NullPointerException in nodetool getendpoints with non-existent keyspace or 
 table
 -

 Key: CASSANDRA-8950
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8950
 Project: Cassandra
  Issue Type: Bug
  Components: Tools
Reporter: Tyler Hobbs
Assignee: Stefania
Priority: Minor
 Fix For: 2.1.4, 2.0.14


 If {{nodetool getendpoints}} is run with a non-existent keyspace or table 
 table, a NullPointerException will occur:
 {noformat}
 ~/cassandra $ bin/nodetool getendpoints badkeyspace badtable mykey
 error: null
 -- StackTrace --
 java.lang.NullPointerException
   at 
 org.apache.cassandra.service.StorageService.getNaturalEndpoints(StorageService.java:2914)
   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
   at 
 sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75)
   at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279)
   at 
 com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112)
   at 
 com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46)
   at 
 com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237)
   at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138)
   at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252)
   at 
 com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819)
   at 
 com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97)
   at 
 javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848)
   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
   at 
 sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322)
   at sun.rmi.transport.Transport$1.run(Transport.java:177)
   at sun.rmi.transport.Transport$1.run(Transport.java:174)
   at java.security.AccessController.doPrivileged(Native Method)
   at sun.rmi.transport.Transport.serviceCall(Transport.java:173)
   at 
 sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:556)
   at 
 sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run0(TCPTransport.java:811)
   at 
 sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run(TCPTransport.java:670)
   at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
   at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
   at java.lang.Thread.run(Thread.java:724)
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8149) bump metrics-reporter-config dependency

2015-03-12 Thread Philip Thompson (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8149?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Philip Thompson updated CASSANDRA-8149:
---
Tester: Philip Thompson

 bump metrics-reporter-config dependency 
 

 Key: CASSANDRA-8149
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8149
 Project: Cassandra
  Issue Type: Improvement
Reporter: Pierre-Yves Ritschard
Assignee: T Jake Luciani
 Fix For: 3.0


 It would be nice to be able to take advantage of the new reporters available 
 in
 metrics-reporter-config 2.3.1 which is now available on maven central.
 If my understanding is correct, this only entails bumping the dependency in 
 build.xml.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8670) Large columns + NIO memory pooling causes excessive direct memory usage

2015-03-12 Thread Ariel Weisberg (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8670?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358866#comment-14358866
 ] 

Ariel Weisberg commented on CASSANDRA-8670:
---

I am getting to this now. Should be fixed in 3.0. Once I have it fixed for 3.0 
we can decide about back porting to 2.1.

 Large columns + NIO memory pooling causes excessive direct memory usage
 ---

 Key: CASSANDRA-8670
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8670
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Ariel Weisberg
Assignee: Ariel Weisberg
 Fix For: 3.0


 If you provide a large byte array to NIO and ask it to populate the byte 
 array from a socket it will allocate a thread local byte buffer that is the 
 size of the requested read no matter how large it is. Old IO wraps new IO for 
 sockets (but not files) so old IO is effected as well.
 Even If you are using Buffered{Input | Output}Stream you can end up passing a 
 large byte array to NIO. The byte array read method will pass the array to 
 NIO directly if it is larger then the internal buffer.  
 Passing large cells between nodes as part of intra-cluster messaging can 
 cause the NIO pooled buffers to quickly reach a high watermark and stay 
 there. This ends up costing 2x the largest cell size because there is a 
 buffer for input and output since they are different threads. This is further 
 multiplied by the number of nodes in the cluster - 1 since each has a 
 dedicated thread pair with separate thread locals.
 Anecdotally it appears that the cost is doubled beyond that although it isn't 
 clear why. Possibly the control connections or possibly there is some way in 
 which multiple 
 Need a workload in CI that tests the advertised limits of cells on a cluster. 
 It would be reasonable to ratchet down the max direct memory for the test to 
 trigger failures if a memory pooling issue is introduced. I don't think we 
 need to test concurrently pulling in a lot of them, but it should at least 
 work serially.
 The obvious fix to address this issue would be to read in smaller chunks when 
 dealing with large values. I think small should still be relatively large (4 
 megabytes) so that code that is reading from a disk can amortize the cost of 
 a seek. It can be hard to tell what the underlying thing being read from is 
 going to be in some of the contexts where we might choose to implement 
 switching to reading chunks.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-5174) expose nodetool scrub for 2Is

2015-03-12 Thread Jonathan Ellis (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-5174?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358836#comment-14358836
 ] 

Jonathan Ellis commented on CASSANDRA-5174:
---

# I think we want to make those separate operations.  The most common use case 
for scrub is I saw a query error out because of a corrupt sstable and I think 
we want scrub to touch only the sstables necessary to fix that.
# skipCorrupted only applies to counters, which can't be indexed, so this 
shouldn't be an issue
# Follow existing practice (I believe scrub and rebuild_index both block)
# For local indexes, we should support standalone mode

 expose nodetool scrub for 2Is
 -

 Key: CASSANDRA-5174
 URL: https://issues.apache.org/jira/browse/CASSANDRA-5174
 Project: Cassandra
  Issue Type: Task
  Components: Core, Tools
Reporter: Jason Brown
Assignee: Stefania
Priority: Minor
 Fix For: 3.0


 Continuation of CASSANDRA-4464, where many other nodetool operations were 
 added for 2Is. This ticket supports scrub fo 2Is and is in its own ticket due 
 to the riskiness of deleting data on a bad bug.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (CASSANDRA-8961) Data rewrite case causes almost non-functional compaction

2015-03-12 Thread Dan Kinder (JIRA)
Dan Kinder created CASSANDRA-8961:
-

 Summary: Data rewrite case causes almost non-functional compaction
 Key: CASSANDRA-8961
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8961
 Project: Cassandra
  Issue Type: Bug
 Environment: Centos 6.6, Cassandra 2.0.12 (Also seen in Cassandra 2.1)
Reporter: Dan Kinder
Priority: Minor


There seems to be a bug of some kind where compaction grinds to a halt in this 
use case: from time to time we have a set of rows we need to migrate, 
changing their primary key by deleting the row and inserting a new row with the 
same partition key and different cluster key. The python script below 
demonstrates this; it takes a bit of time to run (didn't try to optimize it) 
but when it's done it will be trying to compact a few hundred megs of data for 
a long time... on the order of days, or it will never finish.

Not verified by this sandboxed experiment but it seems that compression 
settings do not matter and that this seems to happen to STCS as well, not just 
LCS. I am still testing if other patterns cause this terrible compaction 
performance, like deleting all rows then inserting or vice versa.

Even if it isn't a bug per se, is there a way to fix or work around this 
behavior?

{code}
import string
import random
from cassandra.cluster import Cluster

cluster = Cluster(['localhost'])
db = cluster.connect('walker')

db.execute(DROP KEYSPACE IF EXISTS trial)
db.execute(CREATE KEYSPACE trial
  WITH REPLICATION = { 'class': 'SimpleStrategy', 
'replication_factor': 1 })
db.execute(CREATE TABLE trial.tbl (
pk text,
data text,
PRIMARY KEY(pk, data)
  ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }
AND compression = {'sstable_compression': ''})

# Number of rows to insert and move
n = 20  

# Insert n rows with the same partition key, 1KB of unique data in cluster key
for i in range(n):
db.execute(INSERT INTO trial.tbl (pk, data) VALUES ('thepk', %s),
[str(i).zfill(1024)])

# Update those n rows, deleting each and replacing with a very similar row
for i in range(n):
val = str(i).zfill(1024)
db.execute(DELETE FROM trial.tbl WHERE pk = 'thepk' AND data = %s, [val])
db.execute(INSERT INTO trial.tbl (pk, data) VALUES ('thepk', %s), [1 + 
val])
{code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8839) DatabaseDescriptor throws NPE when rpc_interface is used

2015-03-12 Thread Ariel Weisberg (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8839?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358862#comment-14358862
 ] 

Ariel Weisberg commented on CASSANDRA-8839:
---

I added a test and fixed the bug on 2.1. On OS X and my Linux desktop the 
interfaces all have ipv4 and ipv6 addresses and the code expects that 
interfaces will only have one address. I addressed this by modifying the code 
to accept interfaces with multiple addresses.

There are two new config options in cassandra.yaml. rpc_interface_prefer_ipv6 
and listen_interface_prefer_ipv6 and both default to false. The code for 
selecting an address will now look at prefer_ipv6 and select the first kind of 
preferred address listed by the interface. If the preferred kind of address is 
not available then the first available address is selected.

Doc for this is going to be necessary, and we need to decide if this behavior 
for selecting address is the right way to go.

trunk
https://github.com/apache/cassandra/compare/trunk...aweisberg:C-8839

2.1
https://github.com/apache/cassandra/compare/cassandra-2.1...aweisberg:C-8839-2.1

 DatabaseDescriptor throws NPE when rpc_interface is used
 

 Key: CASSANDRA-8839
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8839
 Project: Cassandra
  Issue Type: Bug
  Components: Config
 Environment: 2.1.3
Reporter: Jan Kesten
Assignee: Ariel Weisberg
 Fix For: 2.1.4


 Copy from mail to dev mailinglist. 
 When using
 - listen_interface instead of listen_address
 - rpc_interface instead of rpc_address
 starting 2.1.3 throws an NPE:
 {code}
 ERROR [main] 2015-02-20 07:50:09,661 DatabaseDescriptor.java:144 - Fatal 
 error during configuration loading
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.config.DatabaseDescriptor.applyConfig(DatabaseDescriptor.java:411)
  ~[apache-cassandra-2.1.3.jar:2.1.3]
 at 
 org.apache.cassandra.config.DatabaseDescriptor.clinit(DatabaseDescriptor.java:133)
  ~[apache-cassandra-2.1.3.jar:2.1.3]
 at 
 org.apache.cassandra.service.CassandraDaemon.setup(CassandraDaemon.java:110) 
 [apache-cassandra-2.1.3.jar:2.1.3]
 at 
 org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:465)
  [apache-cassandra-2.1.3.jar:2.1.3]
 at 
 org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:554) 
 [apache-cassandra-2.1.3.jar:2.1.3]
 {code}
 Occurs on debian package as well as in tar.gz distribution. 
 {code}
 /* Local IP, hostname or interface to bind RPC server to */
 if(conf.rpc_address !=null conf.rpc_interface !=null)
 {
 throw newConfigurationException(Set rpc_address OR rpc_interface, not 
 both);
 }
 else if(conf.rpc_address !=null)
 {
 try
 {
 rpcAddress = InetAddress.getByName(conf.rpc_address);
 }
 catch(UnknownHostException e)
 {
 throw newConfigurationException(Unknown host in rpc_address + 
 conf.rpc_address);
 }
 }
 else if(conf.rpc_interface !=null)
 {
 listenAddress = 
 getNetworkInterfaceAddress(conf.rpc_interface,rpc_interface);
 }
 else
 {
 rpcAddress = FBUtilities.getLocalAddress();
 }
 {code}
 I think that listenAddress in the second else block is an error. In my case 
 rpc_interface is eth0, so listenAddress gets set, and rpcAddress remains 
 unset. The result is NPE in line 411:
 {code}
 if(rpcAddress.isAnyLocalAddress())
 {code}
 After changing rpc_interface to rpc_address everything works as expected.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-4230) Deleting a CF always produces an error and that CF remains in an unknown state

2015-03-12 Thread Cristian Manuel Vertiz Fernandez (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-4230?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358812#comment-14358812
 ] 

Cristian Manuel Vertiz Fernandez commented on CASSANDRA-4230:
-

Note: Issue reproduced on cassandra 2.0.12 (datastax for windows)
Log: refer to cassandraDEBUG.log attached file.

 Deleting a CF always produces an error and that CF remains in an unknown state
 --

 Key: CASSANDRA-4230
 URL: https://issues.apache.org/jira/browse/CASSANDRA-4230
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Affects Versions: 1.1.0
 Environment: Debian Linux Squeeze with the cassandra debian package 
 from Apache.
Reporter: André Cruz
Assignee: Pavel Yaskevich
 Fix For: 1.1.1

 Attachments: 4230-v2.txt, CASSANDRA-4230.patch, cassandraDEBUG.log


 From the CLI perspective:
 [default@Disco] drop column family client; 
 null
 org.apache.thrift.transport.TTransportException
   at 
 org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
   at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
   at 
 org.apache.thrift.transport.TFramedTransport.readFrame(TFramedTransport.java:129)
   at 
 org.apache.thrift.transport.TFramedTransport.read(TFramedTransport.java:101)
   at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:378)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:297)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:204)
   at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:69)
   at 
 org.apache.cassandra.thrift.Cassandra$Client.recv_system_drop_column_family(Cassandra.java:1222)
   at 
 org.apache.cassandra.thrift.Cassandra$Client.system_drop_column_family(Cassandra.java:1209)
   at 
 org.apache.cassandra.cli.CliClient.executeDelColumnFamily(CliClient.java:1301)
   at 
 org.apache.cassandra.cli.CliClient.executeCLIStatement(CliClient.java:234)
   at 
 org.apache.cassandra.cli.CliMain.processStatementInteractive(CliMain.java:219)
   at org.apache.cassandra.cli.CliMain.main(CliMain.java:346)
 Log:
  INFO [MigrationStage:1] 2012-05-09 11:25:35,686 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-schema_columnfamilies@225225949(978/1222 
 serialized/live bytes, 21 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,687 Memtable.java (line 266) 
 Writing Memtable-schema_columnfamilies@225225949(978/1222 serialized/live 
 bytes, 21 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,748 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/system/schema_columnfamilies/system-schema_columnfamilies-hc-34-Data.db
  (1041 bytes)
  INFO [MigrationStage:1] 2012-05-09 11:25:35,749 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-schema_columns@213209572(586/732 
 serialized/live bytes, 12 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,750 Memtable.java (line 266) 
 Writing Memtable-schema_columns@213209572(586/732 serialized/live bytes, 12 
 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,812 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-28-Data.db
  (649 bytes)
  INFO [CompactionExecutor:20] 2012-05-09 11:25:35,814 CompactionTask.java 
 (line 114) Compacting 
 [SSTableReader(path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-27-Data.db'),
  SSTableReader
 (path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-25-Data.db'),
  
 SSTableReader(path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-26-Data.db'),
  SSTableReader(path
 ='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-28-Data.db')]
  INFO [MigrationStage:1] 2012-05-09 11:25:35,918 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-Client@864320066(372/465 serialized/live 
 bytes, 6 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,919 Memtable.java (line 266) 
 Writing Memtable-Client@864320066(372/465 serialized/live bytes, 6 ops)
  INFO [CompactionExecutor:20] 2012-05-09 11:25:35,945 CompactionTask.java 
 (line 225) Compacted to 
 [/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-29-Data.db,].
   22,486 to 20,621 (~91% of orig
 inal) bytes for 2 keys at 0.150120MB/s.  Time: 131ms.
  INFO [FlushWriter:3] 2012-05-09 11:25:36,013 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/Disco/Client/Disco-Client-hc-5-Data.db (407 bytes)
 ERROR [MigrationStage:1] 2012-05-09 11:25:36,043 CLibrary.java (line 

[jira] [Updated] (CASSANDRA-8950) NullPointerException in nodetool getendpoints with non-existent keyspace or table

2015-03-12 Thread Tyler Hobbs (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8950?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Tyler Hobbs updated CASSANDRA-8950:
---
 Reviewer: Tyler Hobbs
Reproduced In: 2.1.3, 2.0.12  (was: 2.0.12, 2.1.3)

 NullPointerException in nodetool getendpoints with non-existent keyspace or 
 table
 -

 Key: CASSANDRA-8950
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8950
 Project: Cassandra
  Issue Type: Bug
  Components: Tools
Reporter: Tyler Hobbs
Assignee: Stefania
Priority: Minor
 Fix For: 2.1.4, 2.0.14


 If {{nodetool getendpoints}} is run with a non-existent keyspace or table 
 table, a NullPointerException will occur:
 {noformat}
 ~/cassandra $ bin/nodetool getendpoints badkeyspace badtable mykey
 error: null
 -- StackTrace --
 java.lang.NullPointerException
   at 
 org.apache.cassandra.service.StorageService.getNaturalEndpoints(StorageService.java:2914)
   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
   at 
 sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75)
   at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279)
   at 
 com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112)
   at 
 com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46)
   at 
 com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237)
   at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138)
   at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252)
   at 
 com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819)
   at 
 com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97)
   at 
 javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420)
   at 
 javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848)
   at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
   at 
 sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
   at 
 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
   at java.lang.reflect.Method.invoke(Method.java:606)
   at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322)
   at sun.rmi.transport.Transport$1.run(Transport.java:177)
   at sun.rmi.transport.Transport$1.run(Transport.java:174)
   at java.security.AccessController.doPrivileged(Native Method)
   at sun.rmi.transport.Transport.serviceCall(Transport.java:173)
   at 
 sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:556)
   at 
 sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run0(TCPTransport.java:811)
   at 
 sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run(TCPTransport.java:670)
   at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
   at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
   at java.lang.Thread.run(Thread.java:724)
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


cassandra git commit: Add WriteFailureException to native protocol

2015-03-12 Thread tylerhobbs
Repository: cassandra
Updated Branches:
  refs/heads/trunk e5d119aab - c059a5689


Add WriteFailureException to native protocol

Patch by Stefania Alborghetti; reviewed by Tyler Hobbs for
CASSANDRA-8592


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/c059a568
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/c059a568
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/c059a568

Branch: refs/heads/trunk
Commit: c059a56890dd7b9aca0addca75a05bcce6b65a77
Parents: e5d119a
Author: Stefania Alborghetti stefania.alborghe...@datastax.com
Authored: Thu Mar 12 11:40:22 2015 -0500
Committer: Tyler Hobbs tylerho...@apache.org
Committed: Thu Mar 12 11:40:22 2015 -0500

--
 CHANGES.txt |   2 +
 doc/native_protocol_v4.spec |  32 +++-
 .../apache/cassandra/db/BatchlogManager.java|  36 +++--
 .../db/CounterMutationVerbHandler.java  |  40 ++---
 .../cassandra/db/HintedHandOffManager.java  |  12 +-
 src/java/org/apache/cassandra/db/Keyspace.java  |   6 +
 .../cassandra/db/MutationVerbHandler.java   |  15 +-
 .../cassandra/exceptions/ExceptionCode.java |   1 +
 .../exceptions/RequestFailureException.java |   2 +-
 .../exceptions/WriteFailureException.java   |  32 
 .../locator/AbstractReplicationStrategy.java|  12 +-
 .../org/apache/cassandra/net/CallbackInfo.java  |   6 +-
 .../org/apache/cassandra/net/IVerbHandler.java  |   4 +-
 .../cassandra/net/MessageDeliveryTask.java  |  37 +++--
 .../apache/cassandra/net/MessagingService.java  |   4 +-
 .../apache/cassandra/net/WriteCallbackInfo.java |   2 +-
 .../service/AbstractWriteResponseHandler.java   |  57 ++-
 .../DatacenterSyncWriteResponseHandler.java |   4 +-
 .../service/DatacenterWriteResponseHandler.java |  12 +-
 .../apache/cassandra/service/StorageProxy.java  | 157 ---
 .../cassandra/service/WriteResponseHandler.java |   4 +-
 .../transport/messages/ErrorMessage.java|  31 +++-
 .../apache/cassandra/db/SerializationsTest.java |   4 +-
 23 files changed, 354 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/c059a568/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index c67acd1..49f6358 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,6 @@
 3.0
+ * Add WriteFailureException to native protocol, notify coordinator of
+   write failures (CASSANDRA-8592)
  * Convert SequentialWriter to nio (CASSANDRA-8709)
  * Add role based access control (CASSANDRA-7653, 8650, 7216, 8760, 8849, 
8761, 8850)
  * Record client ip address in tracing sessions (CASSANDRA-8162)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c059a568/doc/native_protocol_v4.spec
--
diff --git a/doc/native_protocol_v4.spec b/doc/native_protocol_v4.spec
index 03a5a50..69adc17 100644
--- a/doc/native_protocol_v4.spec
+++ b/doc/native_protocol_v4.spec
@@ -1010,9 +1010,9 @@ Table of Contents
  - BATCH: the write was a (logged) batch write.
If this type is received, it means the batch log
has been successfully written (otherwise a
-   BATCH_LOG type would have been send instead).
+   BATCH_LOG type would have been sent instead).
  - UNLOGGED_BATCH: the write was an unlogged
-   batch. Not batch log write has been attempted.
+   batch. No batch log write has been attempted.
  - COUNTER: the write was a counter write
(batched or not).
  - BATCH_LOG: the timeout occured during the
@@ -1058,6 +1058,34 @@ Table of Contents
 keyspace is the keyspace [string] of the failed function
 function is the name [string] of the failed function
 arg_types [string list] one string for each argument type 
(as CQL type) of the failed function
+0x1500Write_failure: A non-timeout exception during a write request. 
The rest
+  of the ERROR message body will be
+clreceivedblockfornumfailureswrite_type
+  where:
+cl is the [consistency] level of the query having triggered
+ the exception.
+received is an [int] representing the number of nodes having
+   answered the request.
+blockfor is the number of replicas whose response is
+   required to achieve cl.
+

[jira] [Commented] (CASSANDRA-4230) Deleting a CF always produces an error and that CF remains in an unknown state

2015-03-12 Thread Joshua McKenzie (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-4230?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359005#comment-14359005
 ] 

Joshua McKenzie commented on CASSANDRA-4230:


[~cvertiz]: doesn't necessarily look like the same issue to me. Your attached 
logs don't show any errors symlinking for snapshots during compaction though 
they do contain the same thrift error message:
{noformat}
DEBUG [Thrift:13] 2015-03-12 09:52:49,291 CustomTThreadPoolServer.java (line 
214) Thrift transport error occurred during processing of message.
 org.apache.thrift.transport.TTransportException
at 
org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
{noformat}

What behavior produced the errors you're seeing?

 Deleting a CF always produces an error and that CF remains in an unknown state
 --

 Key: CASSANDRA-4230
 URL: https://issues.apache.org/jira/browse/CASSANDRA-4230
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Affects Versions: 1.1.0
 Environment: Debian Linux Squeeze with the cassandra debian package 
 from Apache.
Reporter: André Cruz
Assignee: Pavel Yaskevich
 Fix For: 1.1.1

 Attachments: 4230-v2.txt, CASSANDRA-4230.patch, cassandraDEBUG.log


 From the CLI perspective:
 [default@Disco] drop column family client; 
 null
 org.apache.thrift.transport.TTransportException
   at 
 org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
   at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
   at 
 org.apache.thrift.transport.TFramedTransport.readFrame(TFramedTransport.java:129)
   at 
 org.apache.thrift.transport.TFramedTransport.read(TFramedTransport.java:101)
   at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:378)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:297)
   at 
 org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:204)
   at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:69)
   at 
 org.apache.cassandra.thrift.Cassandra$Client.recv_system_drop_column_family(Cassandra.java:1222)
   at 
 org.apache.cassandra.thrift.Cassandra$Client.system_drop_column_family(Cassandra.java:1209)
   at 
 org.apache.cassandra.cli.CliClient.executeDelColumnFamily(CliClient.java:1301)
   at 
 org.apache.cassandra.cli.CliClient.executeCLIStatement(CliClient.java:234)
   at 
 org.apache.cassandra.cli.CliMain.processStatementInteractive(CliMain.java:219)
   at org.apache.cassandra.cli.CliMain.main(CliMain.java:346)
 Log:
  INFO [MigrationStage:1] 2012-05-09 11:25:35,686 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-schema_columnfamilies@225225949(978/1222 
 serialized/live bytes, 21 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,687 Memtable.java (line 266) 
 Writing Memtable-schema_columnfamilies@225225949(978/1222 serialized/live 
 bytes, 21 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,748 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/system/schema_columnfamilies/system-schema_columnfamilies-hc-34-Data.db
  (1041 bytes)
  INFO [MigrationStage:1] 2012-05-09 11:25:35,749 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-schema_columns@213209572(586/732 
 serialized/live bytes, 12 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,750 Memtable.java (line 266) 
 Writing Memtable-schema_columns@213209572(586/732 serialized/live bytes, 12 
 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,812 Memtable.java (line 307) 
 Completed flushing 
 /var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-28-Data.db
  (649 bytes)
  INFO [CompactionExecutor:20] 2012-05-09 11:25:35,814 CompactionTask.java 
 (line 114) Compacting 
 [SSTableReader(path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-27-Data.db'),
  SSTableReader
 (path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-25-Data.db'),
  
 SSTableReader(path='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-26-Data.db'),
  SSTableReader(path
 ='/var/lib/cassandra/data/system/schema_columns/system-schema_columns-hc-28-Data.db')]
  INFO [MigrationStage:1] 2012-05-09 11:25:35,918 ColumnFamilyStore.java (line 
 634) Enqueuing flush of Memtable-Client@864320066(372/465 serialized/live 
 bytes, 6 ops)
  INFO [FlushWriter:3] 2012-05-09 11:25:35,919 Memtable.java (line 266) 
 Writing Memtable-Client@864320066(372/465 serialized/live bytes, 6 ops)
  INFO [CompactionExecutor:20] 2012-05-09 11:25:35,945 CompactionTask.java 
 (line 225) Compacted to 
 

[jira] [Resolved] (CASSANDRA-8870) Tombstone overwhelming issue aborts client queries

2015-03-12 Thread Shawn Kumar (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8870?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Shawn Kumar resolved CASSANDRA-8870.

Resolution: Cannot Reproduce

 Tombstone overwhelming issue aborts client queries
 --

 Key: CASSANDRA-8870
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8870
 Project: Cassandra
  Issue Type: Bug
 Environment: cassandra 2.1.2 ubunbtu 12.04
Reporter: Jeff Liu

 We are getting client queries timeout issues on the clients who are trying to 
 query data from cassandra cluster. 
 Nodetool status shows that all nodes are still up regardless.
 Logs from client side:
 {noformat}
 com.datastax.driver.core.exceptions.NoHostAvailableException: All host(s) 
 tried for query failed (tried: 
 cass-chisel01.abc01.abc02.abc.abc.com/10.66.182.113:9042 
 (com.datastax.driver.core.TransportException: 
 [cass-chisel01.tgr01.iad02.testd.nestlabs.com/10.66.182.113:9042] Connection 
 has been closed))
 at 
 com.datastax.driver.core.RequestHandler.sendRequest(RequestHandler.java:108) 
 ~[com.datastax.cassandra.cassandra-driver-core-2.1.3.jar:na]
 at 
 com.datastax.driver.core.RequestHandler$1.run(RequestHandler.java:179) 
 ~[com.datastax.cassandra.cassandra-driver-core-2.1.3.jar:na]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_55]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  ~[na:1.7.0_55]
 at java.lang.Thread.run(Thread.java:745) ~[na:1.7.0_55]
 {noformat}
 Logs from cassandra/system.log
 {noformat}
 ERROR [HintedHandoff:2] 2015-02-23 23:46:28,410 SliceQueryFilter.java:212 - 
 Scanned over 10 tombstones in system.hints; query aborted (see 
 tombstone_failure_threshold)
 ERROR [HintedHandoff:2] 2015-02-23 23:46:28,417 CassandraDaemon.java:153 - 
 Exception in thread Thread[HintedHandoff:2,1,main]
 org.apache.cassandra.db.filter.TombstoneOverwhelmingException: null
 at 
 org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:214)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:107)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:81)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:69)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:310)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:60)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1858)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1666)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:385)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:344)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.HintedHandOffManager.access$400(HintedHandOffManager.java:94)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 org.apache.cassandra.db.HintedHandOffManager$5.run(HintedHandOffManager.java:555)
  ~[apache-cassandra-2.1.2.jar:2.1.2]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_55]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  ~[na:1.7.0_55]
 at java.lang.Thread.run(Thread.java:745) ~[na:1.7.0_55]
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8909) Replication Strategy creation errors are lost in try/catch

2015-03-12 Thread Alan Boudreault (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8909?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359023#comment-14359023
 ] 

Alan Boudreault commented on CASSANDRA-8909:


Thanks Aleksey!

 Replication Strategy creation errors are lost in try/catch
 --

 Key: CASSANDRA-8909
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8909
 Project: Cassandra
  Issue Type: Improvement
Reporter: Alan Boudreault
Assignee: Alan Boudreault
Priority: Trivial
 Fix For: 2.1.4, 2.0.14

 Attachments: replication-strategy-exception-2.0.patch


 I was initially executing a bad cassandra-stress command  and was getting 
 this error:
 {code}
 Unable to create stress keyspace: Error constructing replication strategy 
 class
 {code}
 with the following command:
 {code}
 cassandra-stress -o insert --replication-strategy NetworkTopologyStrategy 
 --strategy-properties dc1:1,dc2:1 --replication-factor 1
 {code}
 After digging in the code, I noticed that the error displayed was not the one 
 thrown by the replication strategy code and that the try/catch block could be 
 improved. Basically, the Constructor.newInstance can throw an 
 InvocationTargetException, which provide a better error report.
 I think this improvement can also be done in 2.1 (not tested yet). If my 
 attached patch is acceptable, I will test and provide the right version for 
 2.1 and trunk.
 With the patch, I can see the proper error when executing my bad command:
 {code}
 Unable to create stress keyspace: replication_factor is an option for 
 SimpleStrategy, not NetworkTopologyStrategy
 {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-7816) Duplicate DOWN/UP Events Pushed with Native Protocol

2015-03-12 Thread Brandon Williams (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-7816?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359042#comment-14359042
 ] 

Brandon Williams commented on CASSANDRA-7816:
-

With this patch applied, when I ctrl-c out of foreground mode to stop the 
server I just get this message, repeating infinitely:

{noformat}
 WARN 17:37:59,733 Transport error occurred during acceptance of message.
org.apache.thrift.transport.TTransportException: No underlying server socket.
at 
org.apache.cassandra.thrift.TCustomServerSocket.acceptImpl(TCustomServerSocket.java:96)
at 
org.apache.cassandra.thrift.TCustomServerSocket.acceptImpl(TCustomServerSocket.java:36)
at 
org.apache.thrift.transport.TServerTransport.accept(TServerTransport.java:31)
at 
org.apache.cassandra.thrift.CustomTThreadPoolServer.serve(CustomTThreadPoolServer.java:109)
at 
org.apache.cassandra.thrift.ThriftServer$ThriftServerThread.run(ThriftServer.java:136)
{noformat}

 Duplicate DOWN/UP Events Pushed with Native Protocol
 

 Key: CASSANDRA-7816
 URL: https://issues.apache.org/jira/browse/CASSANDRA-7816
 Project: Cassandra
  Issue Type: Bug
  Components: API
Reporter: Michael Penick
Assignee: Stefania
Priority: Minor
 Fix For: 2.0.13, 2.1.4

 Attachments: 7816-v2.0.txt, tcpdump_repeating_status_change.txt, 
 trunk-7816.txt


 Added MOVED_NODE as a possible type of topology change and also specified 
 that it is possible to receive the same event multiple times.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (CASSANDRA-8962) Fix IndexSummaryManagerTest and SSTableRewriterTest on Windows

2015-03-12 Thread Joshua McKenzie (JIRA)
Joshua McKenzie created CASSANDRA-8962:
--

 Summary: Fix IndexSummaryManagerTest and SSTableRewriterTest on 
Windows
 Key: CASSANDRA-8962
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8962
 Project: Cassandra
  Issue Type: Bug
Reporter: Joshua McKenzie
Assignee: Joshua McKenzie
Priority: Trivial
 Fix For: 3.0


Platform specific failures:
org.apache.cassandra.io.sstable.IndexSummaryManagerTest.testCompactionRace
org.apache.cassandra.io.sstable.SSTableRewriterTest.testNumberOfFiles_truncate
org.apache.cassandra.io.sstable.SSTableRewriterTest.testSmallFiles
org.apache.cassandra.io.sstable.SSTableRewriterTest.testNumberOfFiles_dont_clean_readers
org.apache.cassandra.io.sstable.SSTableRewriterTest.testNumberOfFiles_finish_empty_new_writer




--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8770) Support reversing map and set collation order

2015-03-12 Thread Aleksey Yeschenko (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8770?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Aleksey Yeschenko updated CASSANDRA-8770:
-
Assignee: (was: Benjamin Lerer)

 Support reversing map and set collation order
 -

 Key: CASSANDRA-8770
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8770
 Project: Cassandra
  Issue Type: New Feature
  Components: API
Reporter: Jonathan Ellis
Priority: Minor
 Fix For: 3.0






--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-6091) Better Vnode support in hadoop/pig

2015-03-12 Thread mck (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-6091?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

mck updated CASSANDRA-6091:
---
Attachment: (was: cassandra-2.0-6091.txt)

 Better Vnode support in hadoop/pig
 --

 Key: CASSANDRA-6091
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6091
 Project: Cassandra
  Issue Type: Bug
  Components: Hadoop
Reporter: Alex Liu
Assignee: mck

 CASSANDRA-6084 shows there are some issues during running hadoop/pig job if 
 vnodes are enable. Also the hadoop performance of vnode enabled nodes  are 
 bad for there are so many splits.
 The idea is to combine vnode splits into a big sudo splits so it work like 
 vnode is disable for hadoop/pig job



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8838) Resumable bootstrap streaming

2015-03-12 Thread Yuki Morishita (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8838?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359347#comment-14359347
 ] 

Yuki Morishita commented on CASSANDRA-8838:
---

[~Stefania] I pushed rebased version with option to reset available ranges: 
https://github.com/yukim/cassandra/commits/8838-2

User can pass {{-Dcassandra.reset_bootstrap_progress=true}} to reset available 
ranges when booting the node.

 Resumable bootstrap streaming
 -

 Key: CASSANDRA-8838
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8838
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Yuki Morishita
Assignee: Yuki Morishita
Priority: Minor
  Labels: dense-storage
 Fix For: 3.0


 This allows the bootstrapping node not to be streamed already received data.
 The bootstrapping node records received keyspace/ranges as one stream session 
 completes. When some sessions with other nodes fail, bootstrapping fails 
 also, though next time it re-bootstraps, already received keyspace/ranges are 
 skipped to be streamed.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Comment Edited] (CASSANDRA-8238) NPE in SizeTieredCompactionStrategy.filterColdSSTables

2015-03-12 Thread Fredrik LS (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8238?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359370#comment-14359370
 ] 

Fredrik LS edited comment on CASSANDRA-8238 at 3/12/15 8:47 PM:


Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the {code}clientMode{code} flag is set will 
have the {code}readMeter{code} set to null according to the SSTableReader 
constructor. The SSTableReader for SSTables existing at startup will have the 
readMeter set to some value but when JMX bulkloading is used, there will be a 
mix of SSTableReader for the same CF both with readMeter with a value and 
readMeter with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik


was (Author: fredrikl74):
Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the {code}SSTableLoader{code} has a static initializer 
setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the {code}clientMode{code} flag is set will 
have the {code}readMeter{code} set to null according to the SSTableReader 
constructor. The SSTableReader for SSTables existing at startup will have the 
readMeter set to some value but when JMX bulkloading is used, there will be a 
mix of SSTableReader for the same CF both with readMeter with a value and 
readMeter with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik

 NPE in SizeTieredCompactionStrategy.filterColdSSTables
 --

 Key: CASSANDRA-8238
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8238
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Tyler Hobbs
Assignee: Marcus Eriksson
 Fix For: 2.1.3

 Attachments: 0001-assert-that-readMeter-is-not-null.patch


 {noformat}
 ERROR [CompactionExecutor:15] 2014-10-31 15:28:32,318 
 CassandraDaemon.java:153 - Exception in thread 
 Thread[CompactionExecutor:15,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.filterColdSSTables(SizeTieredCompactionStrategy.java:181)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundSSTables(SizeTieredCompactionStrategy.java:83)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundTask(SizeTieredCompactionStrategy.java:267)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:226)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) 
 ~[na:1.7.0_72]
 at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  [na:1.7.0_72]
 at java.lang.Thread.run(Thread.java:745) [na:1.7.0_72]
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Comment Edited] (CASSANDRA-8238) NPE in SizeTieredCompactionStrategy.filterColdSSTables

2015-03-12 Thread Fredrik LS (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8238?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359370#comment-14359370
 ] 

Fredrik LS edited comment on CASSANDRA-8238 at 3/12/15 8:47 PM:


Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the clientMode flag is set will have the 
readMeter set to null according to the SSTableReader constructor. The 
SSTableReader for SSTables existing at startup will have the readMeter set to 
some value but when JMX bulkloading is used, there will be a mix of 
SSTableReader for the same CF both with readMeter with a value and readMeter 
with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik


was (Author: fredrikl74):
Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the clientMode flag is set will have the 
{code}readMeter{code} set to null according to the SSTableReader constructor. 
The SSTableReader for SSTables existing at startup will have the readMeter set 
to some value but when JMX bulkloading is used, there will be a mix of 
SSTableReader for the same CF both with readMeter with a value and readMeter 
with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik

 NPE in SizeTieredCompactionStrategy.filterColdSSTables
 --

 Key: CASSANDRA-8238
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8238
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Tyler Hobbs
Assignee: Marcus Eriksson
 Fix For: 2.1.3

 Attachments: 0001-assert-that-readMeter-is-not-null.patch


 {noformat}
 ERROR [CompactionExecutor:15] 2014-10-31 15:28:32,318 
 CassandraDaemon.java:153 - Exception in thread 
 Thread[CompactionExecutor:15,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.filterColdSSTables(SizeTieredCompactionStrategy.java:181)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundSSTables(SizeTieredCompactionStrategy.java:83)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundTask(SizeTieredCompactionStrategy.java:267)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:226)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) 
 ~[na:1.7.0_72]
 at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  [na:1.7.0_72]
 at java.lang.Thread.run(Thread.java:745) [na:1.7.0_72]
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Resolved] (CASSANDRA-8961) Data rewrite case causes almost non-functional compaction

2015-03-12 Thread Carl Yeksigian (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8961?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Carl Yeksigian resolved CASSANDRA-8961.
---
Resolution: Duplicate

This code generates a range tombstone for each delete; CASSANDRA-8547 shows the 
piece of code which is causing the performance problems. Because it is scanning 
all of the RangeTombstones for each compaction, it scans through the list of 
200k rows 400k times.

After applying the patch in 8547, compaction did finish.

 Data rewrite case causes almost non-functional compaction
 -

 Key: CASSANDRA-8961
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8961
 Project: Cassandra
  Issue Type: Bug
 Environment: Centos 6.6, Cassandra 2.0.12 (Also seen in Cassandra 2.1)
Reporter: Dan Kinder
Priority: Minor

 There seems to be a bug of some kind where compaction grinds to a halt in 
 this use case: from time to time we have a set of rows we need to migrate, 
 changing their primary key by deleting the row and inserting a new row with 
 the same partition key and different cluster key. The python script below 
 demonstrates this; it takes a bit of time to run (didn't try to optimize it) 
 but when it's done it will be trying to compact a few hundred megs of data 
 for a long time... on the order of days, or it will never finish.
 Not verified by this sandboxed experiment but it seems that compression 
 settings do not matter and that this seems to happen to STCS as well, not 
 just LCS. I am still testing if other patterns cause this terrible compaction 
 performance, like deleting all rows then inserting or vice versa.
 Even if it isn't a bug per se, is there a way to fix or work around this 
 behavior?
 {code}
 import string
 import random
 from cassandra.cluster import Cluster
 cluster = Cluster(['localhost'])
 db = cluster.connect('walker')
 db.execute(DROP KEYSPACE IF EXISTS trial)
 db.execute(CREATE KEYSPACE trial
   WITH REPLICATION = { 'class': 'SimpleStrategy', 
 'replication_factor': 1 })
 db.execute(CREATE TABLE trial.tbl (
 pk text,
 data text,
 PRIMARY KEY(pk, data)
   ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }
 AND compression = {'sstable_compression': ''})
 # Number of rows to insert and move
 n = 20  
 
 # Insert n rows with the same partition key, 1KB of unique data in cluster key
 for i in range(n):
 db.execute(INSERT INTO trial.tbl (pk, data) VALUES ('thepk', %s),
 [str(i).zfill(1024)])
 # Update those n rows, deleting each and replacing with a very similar row
 for i in range(n):
 val = str(i).zfill(1024)
 db.execute(DELETE FROM trial.tbl WHERE pk = 'thepk' AND data = %s, 
 [val])
 db.execute(INSERT INTO trial.tbl (pk, data) VALUES ('thepk', %s), [1 
 + val])
 {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Comment Edited] (CASSANDRA-8238) NPE in SizeTieredCompactionStrategy.filterColdSSTables

2015-03-12 Thread Fredrik LS (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8238?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359370#comment-14359370
 ] 

Fredrik LS edited comment on CASSANDRA-8238 at 3/12/15 8:48 PM:


Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the clientMode flag is set will have the 
readMeter set to null according to the SSTableReader constructor. The 
SSTableReader for SSTables existing at startup will have the readMeter set to 
some value but when JMX bulkloading is used, there will be a mix of 
SSTableReader for the same CF both with readMeter with a value and readMeter 
with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the NullPointerException when CompactionExecutor kicks in trying to compact the 
hot SSTables already existing from startup which have a readMeter set and the 
just streamed cold SSTables from JMX bulkloading which have readMeter set to 
null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik


was (Author: fredrikl74):
Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the clientMode flag is set will have the 
readMeter set to null according to the SSTableReader constructor. The 
SSTableReader for SSTables existing at startup will have the readMeter set to 
some value but when JMX bulkloading is used, there will be a mix of 
SSTableReader for the same CF both with readMeter with a value and readMeter 
with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik

 NPE in SizeTieredCompactionStrategy.filterColdSSTables
 --

 Key: CASSANDRA-8238
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8238
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Tyler Hobbs
Assignee: Marcus Eriksson
 Fix For: 2.1.3

 Attachments: 0001-assert-that-readMeter-is-not-null.patch


 {noformat}
 ERROR [CompactionExecutor:15] 2014-10-31 15:28:32,318 
 CassandraDaemon.java:153 - Exception in thread 
 Thread[CompactionExecutor:15,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.filterColdSSTables(SizeTieredCompactionStrategy.java:181)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundSSTables(SizeTieredCompactionStrategy.java:83)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundTask(SizeTieredCompactionStrategy.java:267)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:226)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) 
 ~[na:1.7.0_72]
 at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  [na:1.7.0_72]
 at java.lang.Thread.run(Thread.java:745) [na:1.7.0_72]
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8942) Keep node up even when bootstrap is failed (and provide tool to resume bootstrap)

2015-03-12 Thread Yuki Morishita (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8942?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359433#comment-14359433
 ] 

Yuki Morishita commented on CASSANDRA-8942:
---

Pushed patch based on CASSANDRA-8838: 
https://github.com/yukim/cassandra/commits/8942

This will keep node up after bootstrap failure, and user can use {{nodetool 
bootstrap resume}} to resume bootstrapping.

I also created dtest here: 
https://github.com/yukim/cassandra-dtest/tree/CASSANDRA-8942

 Keep node up even when bootstrap is failed (and provide tool to resume 
 bootstrap)
 -

 Key: CASSANDRA-8942
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8942
 Project: Cassandra
  Issue Type: Sub-task
Reporter: Yuki Morishita
Assignee: Yuki Morishita
Priority: Minor
 Fix For: 3.0


 With CASSANDRA-8838, we can keep bootstrapping node up when some streaming 
 failed, if we provide tool to resume failed bootstrap streaming.
 Failed bootstrap node enters the mode similar to 'write survey mode'. So 
 other nodes in the cluster still view it as bootstrapping, though they send 
 writes to bootstrapping node as well.
 Providing new nodetool command to resume bootstrap from saved bootstrap 
 state, we can continue bootstrapping after resolving issue that caused 
 previous bootstrap failure.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8449) Allow zero-copy reads again

2015-03-12 Thread Benedict (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8449?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359434#comment-14359434
 ] 

Benedict commented on CASSANDRA-8449:
-

bq. -carrot- casus belli

FTFY :)

 Allow zero-copy reads again
 ---

 Key: CASSANDRA-8449
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8449
 Project: Cassandra
  Issue Type: Improvement
Reporter: T Jake Luciani
Assignee: T Jake Luciani
Priority: Minor
  Labels: performance
 Fix For: 3.0


 We disabled zero-copy reads in CASSANDRA-3179 due to in flight reads 
 accessing a ByteBuffer when the data was unmapped by compaction.  Currently 
 this code path is only used for uncompressed reads.
 The actual bytes are in fact copied to the client output buffers for both 
 netty and thrift before being sent over the wire, so the only issue really is 
 the time it takes to process the read internally.  
 This patch adds a slow network read test and changes the tidy() method to 
 actually delete a sstable once the readTimeout has elapsed giving plenty of 
 time to serialize the read.
 Removing this copy causes significantly less GC on the read path and improves 
 the tail latencies:
 http://cstar.datastax.com/graph?stats=c0c8ce16-7fea-11e4-959d-42010af0688fmetric=gc_countoperation=2_readsmoothing=1show_aggregates=truexmin=0xmax=109.34ymin=0ymax=5.5



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Comment Edited] (CASSANDRA-8238) NPE in SizeTieredCompactionStrategy.filterColdSSTables

2015-03-12 Thread Fredrik LS (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8238?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359370#comment-14359370
 ] 

Fredrik LS edited comment on CASSANDRA-8238 at 3/12/15 8:47 PM:


Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the clientMode flag is set will have the 
{code}readMeter{code} set to null according to the SSTableReader constructor. 
The SSTableReader for SSTables existing at startup will have the readMeter set 
to some value but when JMX bulkloading is used, there will be a mix of 
SSTableReader for the same CF both with readMeter with a value and readMeter 
with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik


was (Author: fredrikl74):
Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the SSTableLoader has a static initializer setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the {code}clientMode{code} flag is set will 
have the {code}readMeter{code} set to null according to the SSTableReader 
constructor. The SSTableReader for SSTables existing at startup will have the 
readMeter set to some value but when JMX bulkloading is used, there will be a 
mix of SSTableReader for the same CF both with readMeter with a value and 
readMeter with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik

 NPE in SizeTieredCompactionStrategy.filterColdSSTables
 --

 Key: CASSANDRA-8238
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8238
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Tyler Hobbs
Assignee: Marcus Eriksson
 Fix For: 2.1.3

 Attachments: 0001-assert-that-readMeter-is-not-null.patch


 {noformat}
 ERROR [CompactionExecutor:15] 2014-10-31 15:28:32,318 
 CassandraDaemon.java:153 - Exception in thread 
 Thread[CompactionExecutor:15,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.filterColdSSTables(SizeTieredCompactionStrategy.java:181)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundSSTables(SizeTieredCompactionStrategy.java:83)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundTask(SizeTieredCompactionStrategy.java:267)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:226)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) 
 ~[na:1.7.0_72]
 at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  [na:1.7.0_72]
 at java.lang.Thread.run(Thread.java:745) [na:1.7.0_72]
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8238) NPE in SizeTieredCompactionStrategy.filterColdSSTables

2015-03-12 Thread Fredrik LS (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8238?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359370#comment-14359370
 ] 

Fredrik LS commented on CASSANDRA-8238:
---

Reproduced this in 2.1.3.
I'm not very familiar with the Cassandra code base but took me the liberty to 
do some debugging.
This occurs when doing bulkloading from JMX. 
The problem is that the {code}SSTableLoader{code} has a static initializer 
setting 
{code}
static
{
Config.setClientMode(true);
}
{code} 
(I guess going through JMX shouldn't be considered client mode but only when 
running SSTableLoader standalone).
Every SSTableReader created after the {code}clientMode{code} flag is set will 
have the {code}readMeter{code} set to null according to the SSTableReader 
constructor. The SSTableReader for SSTables existing at startup will have the 
readMeter set to some value but when JMX bulkloading is used, there will be a 
mix of SSTableReader for the same CF both with readMeter with a value and 
readMeter with null. That in combination with hot and cold SSTables in 
{code}SizeTieredCompactionStrategy.filterColdSSTables(...){code} will trigger 
the {code}NullPointerException{code} when CompactionExecutor kicks in trying to 
compact the hot SSTables already existing from startup which have a readMeter 
set and the just streamed cold SSTables from JMX bulkloading which have 
readMeter set to null.

Hope my analyze is correct  and that the code formatting isn't too bad.

Regards
/Fredrik

 NPE in SizeTieredCompactionStrategy.filterColdSSTables
 --

 Key: CASSANDRA-8238
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8238
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Tyler Hobbs
Assignee: Marcus Eriksson
 Fix For: 2.1.3

 Attachments: 0001-assert-that-readMeter-is-not-null.patch


 {noformat}
 ERROR [CompactionExecutor:15] 2014-10-31 15:28:32,318 
 CassandraDaemon.java:153 - Exception in thread 
 Thread[CompactionExecutor:15,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.filterColdSSTables(SizeTieredCompactionStrategy.java:181)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundSSTables(SizeTieredCompactionStrategy.java:83)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.getNextBackgroundTask(SizeTieredCompactionStrategy.java:267)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:226)
  ~[apache-cassandra-2.1.1.jar:2.1.1]
 at 
 java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) 
 ~[na:1.7.0_72]
 at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
  ~[na:1.7.0_72]
 at 
 java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
  [na:1.7.0_72]
 at java.lang.Thread.run(Thread.java:745) [na:1.7.0_72]
 {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8449) Allow zero-copy reads again

2015-03-12 Thread Aleksey Yeschenko (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8449?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359425#comment-14359425
 ] 

Aleksey Yeschenko commented on CASSANDRA-8449:
--

bq. I'm thinking for thrift I just revert to non-zero-copy behavior if the 
thrift rpc is running.

Out of context, just commenting on this quote: in 3.0 thrift will be off by 
default, and better performance with thrift disabled would be a good carrot.

 Allow zero-copy reads again
 ---

 Key: CASSANDRA-8449
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8449
 Project: Cassandra
  Issue Type: Improvement
Reporter: T Jake Luciani
Assignee: T Jake Luciani
Priority: Minor
  Labels: performance
 Fix For: 3.0


 We disabled zero-copy reads in CASSANDRA-3179 due to in flight reads 
 accessing a ByteBuffer when the data was unmapped by compaction.  Currently 
 this code path is only used for uncompressed reads.
 The actual bytes are in fact copied to the client output buffers for both 
 netty and thrift before being sent over the wire, so the only issue really is 
 the time it takes to process the read internally.  
 This patch adds a slow network read test and changes the tidy() method to 
 actually delete a sstable once the readTimeout has elapsed giving plenty of 
 time to serialize the read.
 Removing this copy causes significantly less GC on the read path and improves 
 the tail latencies:
 http://cstar.datastax.com/graph?stats=c0c8ce16-7fea-11e4-959d-42010af0688fmetric=gc_countoperation=2_readsmoothing=1show_aggregates=truexmin=0xmax=109.34ymin=0ymax=5.5



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-7409) Allow multiple overlapping sstables in L1

2015-03-12 Thread Alan Boudreault (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-7409?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Alan Boudreault updated CASSANDRA-7409:
---
Tester: Alan Boudreault

 Allow multiple overlapping sstables in L1
 -

 Key: CASSANDRA-7409
 URL: https://issues.apache.org/jira/browse/CASSANDRA-7409
 Project: Cassandra
  Issue Type: Improvement
Reporter: Carl Yeksigian
Assignee: Carl Yeksigian
  Labels: compaction
 Fix For: 3.0


 Currently, when a normal L0 compaction takes place (not STCS), we take up to 
 MAX_COMPACTING_L0 L0 sstables and all of the overlapping L1 sstables and 
 compact them together. If we didn't have to deal with the overlapping L1 
 tables, we could compact a higher number of L0 sstables together into a set 
 of non-overlapping L1 sstables.
 This could be done by delaying the invariant that L1 has no overlapping 
 sstables. Going from L1 to L2, we would be compacting fewer sstables together 
 which overlap.
 When reading, we will not have the same one sstable per level (except L0) 
 guarantee, but this can be bounded (once we have too many sets of sstables, 
 either compact them back into the same level, or compact them up to the next 
 level).
 This could be generalized to allow any level to be the maximum for this 
 overlapping strategy.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8927) Mark libjna-java + libjna-jni as incompatible in debian package

2015-03-12 Thread Carl Yeksigian (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8927?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358810#comment-14358810
 ] 

Carl Yeksigian commented on CASSANDRA-8927:
---

+1. I just tested changing the test runner to use {{-Djna.nosys=true}} and it 
fixed the {{NoClassDefFoundError}} in Wheezy.

 Mark libjna-java + libjna-jni as incompatible in debian package
 ---

 Key: CASSANDRA-8927
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8927
 Project: Cassandra
  Issue Type: Bug
  Components: Packaging
 Environment: Debian
Reporter: Robert Stupp
Assignee: Michael Shuler
 Fix For: 3.0, 2.1.4


 Current Debian (Wheezy) might bring {{libjna-java}} in version 3.2.7-4, which 
 has incompatible {{libjnadispatch.so}} because since C* 2.1 we use JNA 4.0.0 
 (the native stuff changed):
 jna.jar includes all binaries for all supported platforms - so there's no 
 need for libjna installed separately.
 Since CASSANDRA-8714 has been committed, the incompatibility manifests in 
 {{java.lang.NoClassDefFoundError: Could not initialize class 
 com.sun.jna.Native}} (which is caused by outdated libjna-java installed via 
 apt).
 Note: Debian jessie adds new package {{libjna-jni}} (4.1.0-1) in addition to 
 {{libjna-java}} (4.1.0-1) - both contain the {{libjnidispatch.so}}. Although 
 these seem to work, we might hit the same issue when there's a need to 
 upgrade JNA to 4.2.x sometime.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8670) Large columns + NIO memory pooling causes excessive direct memory usage

2015-03-12 Thread Ariel Weisberg (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8670?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Ariel Weisberg updated CASSANDRA-8670:
--
Description: 
If you provide a large byte array to NIO and ask it to populate the byte array 
from a socket it will allocate a thread local byte buffer that is the size of 
the requested read no matter how large it is. Old IO wraps new IO for sockets 
(but not files) so old IO is effected as well.

Even If you are using Buffered{Input | Output}Stream you can end up passing a 
large byte array to NIO. The byte array read method will pass the array to NIO 
directly if it is larger than the internal buffer.  

Passing large cells between nodes as part of intra-cluster messaging can cause 
the NIO pooled buffers to quickly reach a high watermark and stay there. This 
ends up costing 2x the largest cell size because there is a buffer for input 
and output since they are different threads. This is further multiplied by the 
number of nodes in the cluster - 1 since each has a dedicated thread pair with 
separate thread locals.

Anecdotally it appears that the cost is doubled beyond that although it isn't 
clear why. Possibly the control connections or possibly there is some way in 
which multiple 

Need a workload in CI that tests the advertised limits of cells on a cluster. 
It would be reasonable to ratchet down the max direct memory for the test to 
trigger failures if a memory pooling issue is introduced. I don't think we need 
to test concurrently pulling in a lot of them, but it should at least work 
serially.

The obvious fix to address this issue would be to read in smaller chunks when 
dealing with large values. I think small should still be relatively large (4 
megabytes) so that code that is reading from a disk can amortize the cost of a 
seek. It can be hard to tell what the underlying thing being read from is going 
to be in some of the contexts where we might choose to implement switching to 
reading chunks.

  was:
If you provide a large byte array to NIO and ask it to populate the byte array 
from a socket it will allocate a thread local byte buffer that is the size of 
the requested read no matter how large it is. Old IO wraps new IO for sockets 
(but not files) so old IO is effected as well.

Even If you are using Buffered{Input | Output}Stream you can end up passing a 
large byte array to NIO. The byte array read method will pass the array to NIO 
directly if it is larger then the internal buffer.  

Passing large cells between nodes as part of intra-cluster messaging can cause 
the NIO pooled buffers to quickly reach a high watermark and stay there. This 
ends up costing 2x the largest cell size because there is a buffer for input 
and output since they are different threads. This is further multiplied by the 
number of nodes in the cluster - 1 since each has a dedicated thread pair with 
separate thread locals.

Anecdotally it appears that the cost is doubled beyond that although it isn't 
clear why. Possibly the control connections or possibly there is some way in 
which multiple 

Need a workload in CI that tests the advertised limits of cells on a cluster. 
It would be reasonable to ratchet down the max direct memory for the test to 
trigger failures if a memory pooling issue is introduced. I don't think we need 
to test concurrently pulling in a lot of them, but it should at least work 
serially.

The obvious fix to address this issue would be to read in smaller chunks when 
dealing with large values. I think small should still be relatively large (4 
megabytes) so that code that is reading from a disk can amortize the cost of a 
seek. It can be hard to tell what the underlying thing being read from is going 
to be in some of the contexts where we might choose to implement switching to 
reading chunks.


 Large columns + NIO memory pooling causes excessive direct memory usage
 ---

 Key: CASSANDRA-8670
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8670
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Ariel Weisberg
Assignee: Ariel Weisberg
 Fix For: 3.0


 If you provide a large byte array to NIO and ask it to populate the byte 
 array from a socket it will allocate a thread local byte buffer that is the 
 size of the requested read no matter how large it is. Old IO wraps new IO for 
 sockets (but not files) so old IO is effected as well.
 Even If you are using Buffered{Input | Output}Stream you can end up passing a 
 large byte array to NIO. The byte array read method will pass the array to 
 NIO directly if it is larger than the internal buffer.  
 Passing large cells between nodes as part of intra-cluster messaging can 
 cause the NIO pooled buffers to quickly reach a high watermark and 

[jira] [Commented] (CASSANDRA-8449) Allow zero-copy reads again

2015-03-12 Thread T Jake Luciani (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8449?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359329#comment-14359329
 ] 

T Jake Luciani commented on CASSANDRA-8449:
---

bq. For thrift this is trickier

I'm thinking for thrift I just revert to non-zero-copy behavior if the thrift 
rpc is running.

 Allow zero-copy reads again
 ---

 Key: CASSANDRA-8449
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8449
 Project: Cassandra
  Issue Type: Improvement
Reporter: T Jake Luciani
Assignee: T Jake Luciani
Priority: Minor
  Labels: performance
 Fix For: 3.0


 We disabled zero-copy reads in CASSANDRA-3179 due to in flight reads 
 accessing a ByteBuffer when the data was unmapped by compaction.  Currently 
 this code path is only used for uncompressed reads.
 The actual bytes are in fact copied to the client output buffers for both 
 netty and thrift before being sent over the wire, so the only issue really is 
 the time it takes to process the read internally.  
 This patch adds a slow network read test and changes the tidy() method to 
 actually delete a sstable once the readTimeout has elapsed giving plenty of 
 time to serialize the read.
 Removing this copy causes significantly less GC on the read path and improves 
 the tail latencies:
 http://cstar.datastax.com/graph?stats=c0c8ce16-7fea-11e4-959d-42010af0688fmetric=gc_countoperation=2_readsmoothing=1show_aggregates=truexmin=0xmax=109.34ymin=0ymax=5.5



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-6091) Better Vnode support in hadoop/pig

2015-03-12 Thread mck (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-6091?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

mck updated CASSANDRA-6091:
---
Attachment: cassandra-2.1-6091.txt

 Better Vnode support in hadoop/pig
 --

 Key: CASSANDRA-6091
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6091
 Project: Cassandra
  Issue Type: Bug
  Components: Hadoop
Reporter: Alex Liu
Assignee: mck
 Attachments: cassandra-2.0-6091.txt, cassandra-2.1-6091.txt


 CASSANDRA-6084 shows there are some issues during running hadoop/pig job if 
 vnodes are enable. Also the hadoop performance of vnode enabled nodes  are 
 bad for there are so many splits.
 The idea is to combine vnode splits into a big sudo splits so it work like 
 vnode is disable for hadoop/pig job



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (CASSANDRA-8963) Make OpOrder more intuitive

2015-03-12 Thread Benedict (JIRA)
Benedict created CASSANDRA-8963:
---

 Summary: Make OpOrder more intuitive
 Key: CASSANDRA-8963
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8963
 Project: Cassandra
  Issue Type: Improvement
  Components: Core
Reporter: Benedict
Assignee: Benedict
Priority: Minor
 Fix For: 3.0


There has been plenty of feedback about OpOrder being unintuitive. As well as 
revisiting the naming, I propose to introduce an Action object with RAII 
(AutoCloseable) protection that should be more obvious to users of the API. We 
can also then protect this by a Ref instance for use cases where the action 
lifetime is illdefined, and perhaps also introduce some checks for actions 
whose lifetimes extend beyond a sensible limit to report those where the object 
reference is retained.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Comment Edited] (CASSANDRA-6091) Better Vnode support in hadoop/pig

2015-03-12 Thread mck (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-6091?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359104#comment-14359104
 ] 

mck edited comment on CASSANDRA-6091 at 3/12/15 6:12 PM:
-

new patches on their way for 2.0, 2.1, and trunk.

(there's a silly npe in CqlRecordReader in the first patch so i've removed it, 
but i don't know how to transition the issue back to in progress or opened 
status).


was (Author: michaelsembwever):
new patches on their way for both 2.0 and 2.1. 

(there's a silly npe in CqlRecordReader in the first patch so i've removed it, 
but i don't know how to transition the issue back to in progress or opened 
status).

 Better Vnode support in hadoop/pig
 --

 Key: CASSANDRA-6091
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6091
 Project: Cassandra
  Issue Type: Bug
  Components: Hadoop
Reporter: Alex Liu
Assignee: mck

 CASSANDRA-6084 shows there are some issues during running hadoop/pig job if 
 vnodes are enable. Also the hadoop performance of vnode enabled nodes  are 
 bad for there are so many splits.
 The idea is to combine vnode splits into a big sudo splits so it work like 
 vnode is disable for hadoop/pig job



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-6091) Better Vnode support in hadoop/pig

2015-03-12 Thread mck (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-6091?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359104#comment-14359104
 ] 

mck commented on CASSANDRA-6091:


new patches on their way for both 2.0 and 2.1. 

(there's a silly npe in CqlRecordReader in the first patch so i've removed it, 
but i don't know how to transition the issue back to in progress or opened 
status).

 Better Vnode support in hadoop/pig
 --

 Key: CASSANDRA-6091
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6091
 Project: Cassandra
  Issue Type: Bug
  Components: Hadoop
Reporter: Alex Liu
Assignee: mck

 CASSANDRA-6084 shows there are some issues during running hadoop/pig job if 
 vnodes are enable. Also the hadoop performance of vnode enabled nodes  are 
 bad for there are so many splits.
 The idea is to combine vnode splits into a big sudo splits so it work like 
 vnode is disable for hadoop/pig job



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8072) Exception during startup: Unable to gossip with any seeds

2015-03-12 Thread Philip Thompson (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8072?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Philip Thompson updated CASSANDRA-8072:
---
Reproduced In: 2.1.3, 2.0.11, 2.0.10  (was: 2.0.10, 2.0.11, 2.1.3)
Fix Version/s: 2.0.14
   2.1.4

 Exception during startup: Unable to gossip with any seeds
 -

 Key: CASSANDRA-8072
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8072
 Project: Cassandra
  Issue Type: Bug
Reporter: Ryan Springer
Assignee: Brandon Williams
 Fix For: 2.1.4, 2.0.14

 Attachments: casandra-system-log-with-assert-patch.log


 When Opscenter 4.1.4 or 5.0.1 tries to provision a 2-node DSC 2.0.10 cluster 
 in either ec2 or locally, an error occurs sometimes with one of the nodes 
 refusing to start C*.  The error in the /var/log/cassandra/system.log is:
 ERROR [main] 2014-10-06 15:54:52,292 CassandraDaemon.java (line 513) 
 Exception encountered during startup
 java.lang.RuntimeException: Unable to gossip with any seeds
 at org.apache.cassandra.gms.Gossiper.doShadowRound(Gossiper.java:1200)
 at 
 org.apache.cassandra.service.StorageService.checkForEndpointCollision(StorageService.java:444)
 at 
 org.apache.cassandra.service.StorageService.prepareToJoin(StorageService.java:655)
 at 
 org.apache.cassandra.service.StorageService.initServer(StorageService.java:609)
 at 
 org.apache.cassandra.service.StorageService.initServer(StorageService.java:502)
 at 
 org.apache.cassandra.service.CassandraDaemon.setup(CassandraDaemon.java:378)
 at 
 org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:496)
 at 
 org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:585)
  INFO [StorageServiceShutdownHook] 2014-10-06 15:54:52,326 Gossiper.java 
 (line 1279) Announcing shutdown
  INFO [StorageServiceShutdownHook] 2014-10-06 15:54:54,326 
 MessagingService.java (line 701) Waiting for messaging service to quiesce
  INFO [ACCEPT-localhost/127.0.0.1] 2014-10-06 15:54:54,327 
 MessagingService.java (line 941) MessagingService has terminated the accept() 
 thread
 This errors does not always occur when provisioning a 2-node cluster, but 
 probably around half of the time on only one of the nodes.  I haven't been 
 able to reproduce this error with DSC 2.0.9, and there have been no code or 
 definition file changes in Opscenter.
 I can reproduce locally with the above steps.  I'm happy to test any proposed 
 fixes since I'm the only person able to reproduce reliably so far.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-6091) Better Vnode support in hadoop/pig

2015-03-12 Thread mck (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-6091?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

mck updated CASSANDRA-6091:
---
Attachment: cassandra-2.0-6091.txt

 Better Vnode support in hadoop/pig
 --

 Key: CASSANDRA-6091
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6091
 Project: Cassandra
  Issue Type: Bug
  Components: Hadoop
Reporter: Alex Liu
Assignee: mck
 Attachments: cassandra-2.0-6091.txt


 CASSANDRA-6084 shows there are some issues during running hadoop/pig job if 
 vnodes are enable. Also the hadoop performance of vnode enabled nodes  are 
 bad for there are so many splits.
 The idea is to combine vnode splits into a big sudo splits so it work like 
 vnode is disable for hadoop/pig job



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8951) Add smallint (and possibly byte) type

2015-03-12 Thread Benedict (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8951?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359170#comment-14359170
 ] 

Benedict commented on CASSANDRA-8951:
-

Just a note to start the ball rolling with byte-order comparability for 
smallint...

 Add smallint (and possibly byte) type
 -

 Key: CASSANDRA-8951
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8951
 Project: Cassandra
  Issue Type: Improvement
Reporter: Sylvain Lebresne

 We have {{int}} and {{bigint}}, but we don't have a {{smallint}} (2 bytes). I 
 see no reason not to add it. And while we're at it, it doesn't cost much to 
 add a {{byte}} type either.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8067) NullPointerException in KeyCacheSerializer

2015-03-12 Thread Davide (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8067?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359080#comment-14359080
 ] 

Davide commented on CASSANDRA-8067:
---

Is there a way to fix the issue without the patch? When the 2.1.4 is planned to 
be released? 

 NullPointerException in KeyCacheSerializer
 --

 Key: CASSANDRA-8067
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8067
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Eric Leleu
Assignee: Aleksey Yeschenko
 Fix For: 2.1.4

 Attachments: 8067.txt


 Hi,
 I have this stack trace in the logs of Cassandra server (v2.1)
 {code}
 ERROR [CompactionExecutor:14] 2014-10-06 23:32:02,098 
 CassandraDaemon.java:166 - Exception in thread 
 Thread[CompactionExecutor:14,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.service.CacheService$KeyCacheSerializer.serialize(CacheService.java:475)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at 
 org.apache.cassandra.service.CacheService$KeyCacheSerializer.serialize(CacheService.java:463)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at 
 org.apache.cassandra.cache.AutoSavingCache$Writer.saveCache(AutoSavingCache.java:225)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$11.run(CompactionManager.java:1061)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at java.util.concurrent.Executors$RunnableAdapter.call(Unknown 
 Source) ~[na:1.7.0]
 at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) 
 ~[na:1.7.0]
 at java.util.concurrent.FutureTask.run(Unknown Source) ~[na:1.7.0]
 at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source) 
 [na:1.7.0]
 at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) 
 [na:1.7.0]
 at java.lang.Thread.run(Unknown Source) [na:1.7.0]
 {code}
 It may not be critical because this error occured in the AutoSavingCache. 
 However the line 475 is about the CFMetaData so it may hide bigger issue...
 {code}
  474 CFMetaData cfm = 
 Schema.instance.getCFMetaData(key.desc.ksname, key.desc.cfname);
  475 cfm.comparator.rowIndexEntrySerializer().serialize(entry, 
 out);
 {code}
 Regards,
 Eric



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-7816) Duplicate DOWN/UP Events Pushed with Native Protocol

2015-03-12 Thread Jim Witschey (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-7816?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359129#comment-14359129
 ] 

Jim Witschey commented on CASSANDRA-7816:
-

A few notes from getting the dtests running:

- The documentation changes in trunk-7816.txt have gone out of sync. I can 
`wiggle` them in, but could you rebase them?
- 
[{{move_single_node_test}}|https://github.com/stef1927/cassandra-dtest/blob/7816/pushed_notifications_test.py#L76]
 still fails for me with the patch. You can see the failure I running patched 
trunk 
[here|https://gist.github.com/mambocab/ee05dd11a974e39a6f25#file-patched-c-patched-tests-trunk].
 I get the same failure on patched 2.0. Is this supposed to still fail? To run 
that test, set the {{DISABLE_VNODES}} environment variable to {{true}} before 
running the tests.

 Duplicate DOWN/UP Events Pushed with Native Protocol
 

 Key: CASSANDRA-7816
 URL: https://issues.apache.org/jira/browse/CASSANDRA-7816
 Project: Cassandra
  Issue Type: Bug
  Components: API
Reporter: Michael Penick
Assignee: Stefania
Priority: Minor
 Fix For: 2.0.13, 2.1.4

 Attachments: 7816-v2.0.txt, tcpdump_repeating_status_change.txt, 
 trunk-7816.txt


 Added MOVED_NODE as a possible type of topology change and also specified 
 that it is possible to receive the same event multiple times.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


Git Push Summary

2015-03-12 Thread jake
Repository: cassandra
Updated Tags:  refs/tags/2.0.13-tentative [created] a936d7e7f


cassandra git commit: bump versions

2015-03-12 Thread jake
Repository: cassandra
Updated Branches:
  refs/heads/cassandra-2.0.13 a936d7e7f - 20593c24a


bump versions


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/20593c24
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/20593c24
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/20593c24

Branch: refs/heads/cassandra-2.0.13
Commit: 20593c24a9f8c07aff8f848f4b5774ed88ea4687
Parents: a936d7e
Author: T Jake Luciani j...@apache.org
Authored: Thu Mar 12 21:01:11 2015 -0400
Committer: T Jake Luciani j...@apache.org
Committed: Thu Mar 12 21:01:11 2015 -0400

--
 build.xml| 2 +-
 debian/changelog | 6 ++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/20593c24/build.xml
--
diff --git a/build.xml b/build.xml
index 78c6db3..bdbd10a 100644
--- a/build.xml
+++ b/build.xml
@@ -25,7 +25,7 @@
 property name=debuglevel value=source,lines,vars/
 
 !-- default version and SCM information --
-property name=base.version value=2.0.12/
+property name=base.version value=2.0.13/
 property name=scm.connection 
value=scm:git://git.apache.org/cassandra.git/
 property name=scm.developerConnection 
value=scm:git://git.apache.org/cassandra.git/
 property name=scm.url 
value=http://git-wip-us.apache.org/repos/asf?p=cassandra.git;a=tree/

http://git-wip-us.apache.org/repos/asf/cassandra/blob/20593c24/debian/changelog
--
diff --git a/debian/changelog b/debian/changelog
index 53fa20f..2c80800 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+cassandra (2.0.13) unstable; urgency=medium
+
+  * New release
+
+ -- Jake Luciani j...@apache.org  Thu, 12 Mar 2015 21:00:06 -0400
+
 cassandra (2.0.12) unstable; urgency=medium
 
   * New release 



Git Push Summary

2015-03-12 Thread jake
Repository: cassandra
Updated Tags:  refs/tags/2.0.13-tentative [deleted] a936d7e7f


[jira] [Commented] (CASSANDRA-8067) NullPointerException in KeyCacheSerializer

2015-03-12 Thread Aleksey Yeschenko (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8067?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359721#comment-14359721
 ] 

Aleksey Yeschenko commented on CASSANDRA-8067:
--

[~DAddYE] In a week or two.

 NullPointerException in KeyCacheSerializer
 --

 Key: CASSANDRA-8067
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8067
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Eric Leleu
Assignee: Aleksey Yeschenko
 Fix For: 2.1.4

 Attachments: 8067.txt


 Hi,
 I have this stack trace in the logs of Cassandra server (v2.1)
 {code}
 ERROR [CompactionExecutor:14] 2014-10-06 23:32:02,098 
 CassandraDaemon.java:166 - Exception in thread 
 Thread[CompactionExecutor:14,1,main]
 java.lang.NullPointerException: null
 at 
 org.apache.cassandra.service.CacheService$KeyCacheSerializer.serialize(CacheService.java:475)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at 
 org.apache.cassandra.service.CacheService$KeyCacheSerializer.serialize(CacheService.java:463)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at 
 org.apache.cassandra.cache.AutoSavingCache$Writer.saveCache(AutoSavingCache.java:225)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at 
 org.apache.cassandra.db.compaction.CompactionManager$11.run(CompactionManager.java:1061)
  ~[apache-cassandra-2.1.0.jar:2.1.0]
 at java.util.concurrent.Executors$RunnableAdapter.call(Unknown 
 Source) ~[na:1.7.0]
 at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) 
 ~[na:1.7.0]
 at java.util.concurrent.FutureTask.run(Unknown Source) ~[na:1.7.0]
 at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source) 
 [na:1.7.0]
 at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) 
 [na:1.7.0]
 at java.lang.Thread.run(Unknown Source) [na:1.7.0]
 {code}
 It may not be critical because this error occured in the AutoSavingCache. 
 However the line 475 is about the CFMetaData so it may hide bigger issue...
 {code}
  474 CFMetaData cfm = 
 Schema.instance.getCFMetaData(key.desc.ksname, key.desc.cfname);
  475 cfm.comparator.rowIndexEntrySerializer().serialize(entry, 
 out);
 {code}
 Regards,
 Eric



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-6091) Better Vnode support in hadoop/pig

2015-03-12 Thread mck (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-6091?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

mck updated CASSANDRA-6091:
---
Attachment: trunk-6091.txt

 Better Vnode support in hadoop/pig
 --

 Key: CASSANDRA-6091
 URL: https://issues.apache.org/jira/browse/CASSANDRA-6091
 Project: Cassandra
  Issue Type: Bug
  Components: Hadoop
Reporter: Alex Liu
Assignee: mck
 Attachments: cassandra-2.0-6091.txt, cassandra-2.1-6091.txt, 
 trunk-6091.txt


 CASSANDRA-6084 shows there are some issues during running hadoop/pig job if 
 vnodes are enable. Also the hadoop performance of vnode enabled nodes  are 
 bad for there are so many splits.
 The idea is to combine vnode splits into a big sudo splits so it work like 
 vnode is disable for hadoop/pig job



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8964) SSTable count rises during compactions and max open files exceeded

2015-03-12 Thread Anthony Fisk (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8964?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Anthony Fisk updated CASSANDRA-8964:

Description: 
LCS compaction was not able to keep up with the prolonged insert load on one of 
our tables called log, resulting in 2,185 SSTables for that table and 1,779 
pending compactions all together during a test we were running.

We stopped our load, unthrottled compaction throughput, increased the 
concurrent compactors from 2 to 8, and let it compact the SSTables.

All was going well until the number of SSTables count for our log table got 
down to around 97, then began rising again until it had reached 758 SSTables 
1.5 hours later... (we've been recording the cfstats output every half hour, 
[attached|^nodetool_cfstats.zip])

Eventually we exceeded the number of open files:
{code}
ERROR [MemtableFlushWriter:286] 2015-03-12 13:44:36,748 
CassandraDaemon.java:153 - Exception in thread 
Thread[MemtableFlushWriter:286,5,main]
java.lang.RuntimeException: java.io.FileNotFoundException: 
/mnt/cassandra/data/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-tmp-ka-6618-Index.db
 (Too many open files)
at 
org.apache.cassandra.io.util.SequentialWriter.init(SequentialWriter.java:75) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:104) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:99) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter$IndexWriter.init(SSTableWriter.java:552)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter.init(SSTableWriter.java:134) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.createFlushWriter(Memtable.java:390)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.writeSortedContents(Memtable.java:329)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.runWith(Memtable.java:313) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
com.google.common.util.concurrent.MoreExecutors$SameThreadExecutorService.execute(MoreExecutors.java:297)
 ~[guava-16.0.jar:na]
at 
org.apache.cassandra.db.ColumnFamilyStore$Flush.run(ColumnFamilyStore.java:1037)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) 
~[na:1.7.0_51]
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) 
~[na:1.7.0_51]
at java.lang.Thread.run(Thread.java:744) ~[na:1.7.0_51]
Caused by: java.io.FileNotFoundException: 
/mnt/cassandra/data/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-tmp-ka-6618-Index.db
 (Too many open files)
at java.io.RandomAccessFile.open(Native Method) ~[na:1.7.0_51]
at java.io.RandomAccessFile.init(RandomAccessFile.java:241) 
~[na:1.7.0_51]
at 
org.apache.cassandra.io.util.SequentialWriter.init(SequentialWriter.java:71) 
~[apache-cassandra-2.1.2.jar:2.1.2]
... 14 common frames omitted
ERROR [MemtableFlushWriter:286] 2015-03-12 13:44:36,750 
JVMStabilityInspector.java:94 - JVM state determined to be unstable.  Exiting 
forcefully due to:
java.io.FileNotFoundException: 
/mnt/cassandra/data/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-tmp-ka-6618-Index.db
 (Too many open files)
at java.io.RandomAccessFile.open(Native Method) ~[na:1.7.0_51]
at java.io.RandomAccessFile.init(RandomAccessFile.java:241) 
~[na:1.7.0_51]
at 
org.apache.cassandra.io.util.SequentialWriter.init(SequentialWriter.java:71) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:104) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:99) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter$IndexWriter.init(SSTableWriter.java:552)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter.init(SSTableWriter.java:134) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.createFlushWriter(Memtable.java:390)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 

[jira] [Created] (CASSANDRA-8964) SSTable count rises during compactions and max open files exceeded

2015-03-12 Thread Anthony Fisk (JIRA)
Anthony Fisk created CASSANDRA-8964:
---

 Summary: SSTable count rises during compactions and max open files 
exceeded
 Key: CASSANDRA-8964
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8964
 Project: Cassandra
  Issue Type: Bug
 Environment: Apache Cassandra 2.1.2
Centos 6
AWS EC2
i2.2xlarge
Reporter: Anthony Fisk
Priority: Critical
 Attachments: lsof_with_tmp.txt, lsof_without_tmp.txt, 
nodetool_cfstats.zip

LCS compaction was not able to keep up with the prolonged insert load on one of 
our tables called log, resulting in 2,185 SSTables for that table and 1,779 
pending compactions all together during a test we were running.

We stopped our load, unthrottled compaction throughput, increased the 
concurrent compactors from 2 to 8, and let it compact the SSTables.

All was going well until the number of SSTables count for our log table got 
down to around 97, then began rising again until it had reached 758 SSTables 
1.5 hours later... (we've been recording the cfstats output every half hour, 
[attached|nodetool_cfstats.zip])

Eventually we exceeded the number of open files:
{code}
ERROR [MemtableFlushWriter:286] 2015-03-12 13:44:36,748 
CassandraDaemon.java:153 - Exception in thread 
Thread[MemtableFlushWriter:286,5,main]
java.lang.RuntimeException: java.io.FileNotFoundException: 
/mnt/cassandra/data/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-tmp-ka-6618-Index.db
 (Too many open files)
at 
org.apache.cassandra.io.util.SequentialWriter.init(SequentialWriter.java:75) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:104) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:99) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter$IndexWriter.init(SSTableWriter.java:552)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter.init(SSTableWriter.java:134) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.createFlushWriter(Memtable.java:390)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.writeSortedContents(Memtable.java:329)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.db.Memtable$FlushRunnable.runWith(Memtable.java:313) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
com.google.common.util.concurrent.MoreExecutors$SameThreadExecutorService.execute(MoreExecutors.java:297)
 ~[guava-16.0.jar:na]
at 
org.apache.cassandra.db.ColumnFamilyStore$Flush.run(ColumnFamilyStore.java:1037)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) 
~[na:1.7.0_51]
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) 
~[na:1.7.0_51]
at java.lang.Thread.run(Thread.java:744) ~[na:1.7.0_51]
Caused by: java.io.FileNotFoundException: 
/mnt/cassandra/data/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-tmp-ka-6618-Index.db
 (Too many open files)
at java.io.RandomAccessFile.open(Native Method) ~[na:1.7.0_51]
at java.io.RandomAccessFile.init(RandomAccessFile.java:241) 
~[na:1.7.0_51]
at 
org.apache.cassandra.io.util.SequentialWriter.init(SequentialWriter.java:71) 
~[apache-cassandra-2.1.2.jar:2.1.2]
... 14 common frames omitted
ERROR [MemtableFlushWriter:286] 2015-03-12 13:44:36,750 
JVMStabilityInspector.java:94 - JVM state determined to be unstable.  Exiting 
forcefully due to:
java.io.FileNotFoundException: 
/mnt/cassandra/data/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-tmp-ka-6618-Index.db
 (Too many open files)
at java.io.RandomAccessFile.open(Native Method) ~[na:1.7.0_51]
at java.io.RandomAccessFile.init(RandomAccessFile.java:241) 
~[na:1.7.0_51]
at 
org.apache.cassandra.io.util.SequentialWriter.init(SequentialWriter.java:71) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:104) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.util.SequentialWriter.open(SequentialWriter.java:99) 
~[apache-cassandra-2.1.2.jar:2.1.2]
at 
org.apache.cassandra.io.sstable.SSTableWriter$IndexWriter.init(SSTableWriter.java:552)
 ~[apache-cassandra-2.1.2.jar:2.1.2]
at 

[jira] [Commented] (CASSANDRA-8961) Data rewrite case causes almost non-functional compaction

2015-03-12 Thread Dan Kinder (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8961?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359540#comment-14359540
 ] 

Dan Kinder commented on CASSANDRA-8961:
---

I see. Is there some way to make this DELETE query not use RangeTombstones? 
Would it work to insert the full set of columns (ex. DELETE pk, data FROM ...)?

Also CASSANDRA-6446 seems related.

 Data rewrite case causes almost non-functional compaction
 -

 Key: CASSANDRA-8961
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8961
 Project: Cassandra
  Issue Type: Bug
 Environment: Centos 6.6, Cassandra 2.0.12 (Also seen in Cassandra 2.1)
Reporter: Dan Kinder
Priority: Minor

 There seems to be a bug of some kind where compaction grinds to a halt in 
 this use case: from time to time we have a set of rows we need to migrate, 
 changing their primary key by deleting the row and inserting a new row with 
 the same partition key and different cluster key. The python script below 
 demonstrates this; it takes a bit of time to run (didn't try to optimize it) 
 but when it's done it will be trying to compact a few hundred megs of data 
 for a long time... on the order of days, or it will never finish.
 Not verified by this sandboxed experiment but it seems that compression 
 settings do not matter and that this seems to happen to STCS as well, not 
 just LCS. I am still testing if other patterns cause this terrible compaction 
 performance, like deleting all rows then inserting or vice versa.
 Even if it isn't a bug per se, is there a way to fix or work around this 
 behavior?
 {code}
 import string
 import random
 from cassandra.cluster import Cluster
 cluster = Cluster(['localhost'])
 db = cluster.connect('walker')
 db.execute(DROP KEYSPACE IF EXISTS trial)
 db.execute(CREATE KEYSPACE trial
   WITH REPLICATION = { 'class': 'SimpleStrategy', 
 'replication_factor': 1 })
 db.execute(CREATE TABLE trial.tbl (
 pk text,
 data text,
 PRIMARY KEY(pk, data)
   ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }
 AND compression = {'sstable_compression': ''})
 # Number of rows to insert and move
 n = 20  
 
 # Insert n rows with the same partition key, 1KB of unique data in cluster key
 for i in range(n):
 db.execute(INSERT INTO trial.tbl (pk, data) VALUES ('thepk', %s),
 [str(i).zfill(1024)])
 # Update those n rows, deleting each and replacing with a very similar row
 for i in range(n):
 val = str(i).zfill(1024)
 db.execute(DELETE FROM trial.tbl WHERE pk = 'thepk' AND data = %s, 
 [val])
 db.execute(INSERT INTO trial.tbl (pk, data) VALUES ('thepk', %s), [1 
 + val])
 {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8819) LOCAL_QUORUM writes returns wrong message

2015-03-12 Thread Alan Boudreault (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8819?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359577#comment-14359577
 ] 

Alan Boudreault commented on CASSANDRA-8819:


FYI, a dtest has been created for this ticket: 
https://github.com/riptano/cassandra-dtest/pull/194 , not committed yet but 
will be soon.

 LOCAL_QUORUM writes returns wrong message
 -

 Key: CASSANDRA-8819
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8819
 Project: Cassandra
  Issue Type: Bug
  Components: Core
 Environment: CentOS 6.6
Reporter: Wei Zhu
Assignee: Sylvain Lebresne
 Fix For: 2.0.13

 Attachments: 8819-2.0.patch


 We have two DC3, each with 7 nodes.
 Here is the keyspace setup:
  create keyspace test
  with placement_strategy = 'NetworkTopologyStrategy'
  and strategy_options = {DC2 : 3, DC1 : 3}
  and durable_writes = true;
 We brought down two nodes in DC2 for maintenance. We only write to DC1 using 
 local_quroum (using datastax JavaClient)
 But we see this errors in the log:
 Cassandra timeout during write query at consistency LOCAL_QUORUM (4 replica 
 were required but only 3 acknowledged the write
 why does it say 4 replica were required? and Why would it give error back to 
 client since local_quorum should succeed.
 Here are the output from nodetool status
 Note: Ownership information does not include topology; for complete 
 information, specify a keyspace
 Datacenter: DC2
 ===
 Status=Up/Down
 |/ State=Normal/Leaving/Joining/Moving
 --  Address  Load   Tokens  Owns   Host ID
Rack
 UN  10.2.0.1  10.92 GB   256 7.9%     RAC206
 UN  10.2.0.2   6.17 GB256 8.0%     RAC106
 UN  10.2.0.3  6.63 GB256 7.3%     RAC107
 DL  10.2.0.4  1.54 GB256 7.7%    RAC107
 UN  10.2.0.5  6.02 GB256 6.6%     RAC106
 UJ  10.2.0.6   3.68 GB256 ?    RAC205
 UN  10.2.0.7  7.22 GB256 7.7%    RAC205
 Datacenter: DC1
 ===
 Status=Up/Down
 |/ State=Normal/Leaving/Joining/Moving
 --  Address  Load   Tokens  Owns   Host ID
Rack
 UN  10.1.0.1   6.04 GB256 8.6%    RAC10
 UN  10.1.0.2   7.55 GB256 7.4%     RAC8
 UN  10.1.0.3   5.83 GB256 7.0%     RAC9
 UN  10.1.0.47.34 GB256 7.9%     RAC6
 UN  10.1.0.5   7.57 GB256 8.0%    RAC7
 UN  10.1.0.6   5.31 GB256 7.3%     RAC10
 UN  10.1.0.7   5.47 GB256 8.6%    RAC9
 I did a cql trace on the query and here is the trace, and it does say 
Write timeout; received 3 of 4 required replies | 17:27:52,831 |  10.1.0.1 
 |2002873
 at the end. I guess that is where the client gets the error from. But the 
 rows was inserted to Cassandra correctly. And I traced read with local_quorum 
 and it behaves correctly and the reads don't go to DC2. The problem is only 
 with writes on local_quorum.
 {code}
 Tracing session: 5a789fb0-b70d-11e4-8fca-99bff9c19890
  activity 
| timestamp
 | source  | source_elapsed
 -+--+-+
   
 execute_cql3_query | 17:27:50,828 
 |  10.1.0.1 |  0
  Parsing insert into test (user_id, created, event_data, event_id)values ( 
 123456789 , 9eab8950-b70c-11e4-8fca-99bff9c19891, 'test', '16'); | 
 17:27:50,828 |  10.1.0.1 | 39
   
Preparing statement | 17:27:50,828 
 |  10.1.0.1 |135
   
  Message received from /10.1.0.1 | 17:27:50,829 | 
  10.1.0.5 | 25
   
 Sending message to /10.1.0.5 | 17:27:50,829 | 
  10.1.0.1 |421
   
  

[jira] [Commented] (CASSANDRA-8959) More efficient frozen UDT and tuple serialization format

2015-03-12 Thread Benedict (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8959?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358445#comment-14358445
 ] 

Benedict commented on CASSANDRA-8959:
-

Shouldn't this just be a replication of whatever strategy we choose for 
encoding tables? 

 More efficient frozen UDT and tuple serialization format
 

 Key: CASSANDRA-8959
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8959
 Project: Cassandra
  Issue Type: Improvement
Reporter: Aleksey Yeschenko
  Labels: performance
 Fix For: 3.1


 The current serialization format for UDTs has a fixed overhead of 4 bytes per 
 defined field (encoding the size of the field).
 It is inefficient for sparse UDTs - ones with many defined fields, but few of 
 them present. We could keep a bitset to indicate the missing fields, if any.
 It's sub-optimal for encoding UDTs with all the values present as well. We 
 could use varint encoding for the field sizes of blob/text fields and encode 
 'fixed' sized types directly, without the 4-bytes size prologue.
 That or something more brilliant. Any improvement right now is lhf.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (CASSANDRA-8960) Suspect SSTable status is lost when rewriter is aborted

2015-03-12 Thread Branimir Lambov (JIRA)
Branimir Lambov created CASSANDRA-8960:
--

 Summary: Suspect SSTable status is lost when rewriter is aborted
 Key: CASSANDRA-8960
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8960
 Project: Cassandra
  Issue Type: Bug
Reporter: Branimir Lambov
Priority: Minor


This can cause repeated compaction failures and buildup if an SSTable opens 
correctly but fails during iteration. The exception will trigger a 
{{writer.abort()}} in {{CompactionTask}}, which in turn will replace suspect 
tables with clones obtained through {{cloneWithNewStart()}}. The latter does 
not copy suspect status, hence the node no longer knows that reading from this 
table has failed.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[1/3] cassandra git commit: pass correct (but unused in this context) value to SSTableWriter.afterAppend, and rename parameter name

2015-03-12 Thread benedict
Repository: cassandra
Updated Branches:
  refs/heads/cassandra-2.1 4831ba14a - cbd4de8f5
  refs/heads/trunk 24915cd48 - e5d119aab


pass correct (but unused in this context) value to SSTableWriter.afterAppend, 
and rename parameter name


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/cbd4de8f
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/cbd4de8f
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/cbd4de8f

Branch: refs/heads/cassandra-2.1
Commit: cbd4de8f51cf0da1579abcd8d93803c4e8845024
Parents: 4831ba1
Author: Benedict Elliott Smith bened...@apache.org
Authored: Thu Mar 12 12:26:03 2015 +
Committer: Benedict Elliott Smith bened...@apache.org
Committed: Thu Mar 12 12:26:03 2015 +

--
 .../apache/cassandra/io/sstable/SSTableWriter.java| 14 --
 1 file changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/cbd4de8f/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
--
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java 
b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index b35b652..440961f 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@ -173,7 +173,7 @@ public class SSTableWriter extends SSTable
 return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
 }
 
-private void afterAppend(DecoratedKey decoratedKey, long dataPosition, 
RowIndexEntry index)
+private void afterAppend(DecoratedKey decoratedKey, long dataEnd, 
RowIndexEntry index)
 {
 sstableMetadataCollector.addKey(decoratedKey.getKey());
 lastWrittenKey = decoratedKey;
@@ -182,9 +182,9 @@ public class SSTableWriter extends SSTable
 first = lastWrittenKey;
 
 if (logger.isTraceEnabled())
-logger.trace(wrote  + decoratedKey +  at  + dataPosition);
-iwriter.append(decoratedKey, index, dataPosition);
-dbuilder.addPotentialBoundary(dataPosition);
+logger.trace(wrote  + decoratedKey +  at  + dataEnd);
+iwriter.append(decoratedKey, index, dataEnd);
+dbuilder.addPotentialBoundary(dataEnd);
 }
 
 /**
@@ -222,16 +222,18 @@ public class SSTableWriter extends SSTable
 }
 
 long startPosition = beforeAppend(decoratedKey);
+long endPosition;
 try
 {
 RowIndexEntry entry = rawAppend(cf, startPosition, decoratedKey, 
dataFile.stream);
-afterAppend(decoratedKey, startPosition, entry);
+endPosition = dataFile.getFilePointer();
+afterAppend(decoratedKey, endPosition, entry);
 }
 catch (IOException e)
 {
 throw new FSWriteError(e, dataFile.getPath());
 }
-sstableMetadataCollector.update(dataFile.getFilePointer() - 
startPosition, cf.getColumnStats());
+sstableMetadataCollector.update(endPosition - startPosition, 
cf.getColumnStats());
 }
 
 public static RowIndexEntry rawAppend(ColumnFamily cf, long startPosition, 
DecoratedKey key, DataOutputPlus out) throws IOException



[jira] [Commented] (CASSANDRA-8915) Improve MergeIterator performance

2015-03-12 Thread Benedict (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8915?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358578#comment-14358578
 ] 

Benedict commented on CASSANDRA-8915:
-

For this test, I would suggest running with a small memtable size (to amplify 
compaction costs), and running stress against a schema very large partition 
with multiple clustering components, and using the revisit parameter, and the 
sorted visitation order, to spread each partition sequentially across multiple 
flushes.

 Improve MergeIterator performance
 -

 Key: CASSANDRA-8915
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8915
 Project: Cassandra
  Issue Type: Improvement
Reporter: Branimir Lambov
Assignee: Branimir Lambov
Priority: Minor

 The implementation of {{MergeIterator}} uses a priority queue and applies a 
 pair of {{poll}}+{{add}} operations for every item in the resulting sequence. 
 This is quite inefficient as {{poll}} necessarily applies at least {{log N}} 
 comparisons (up to {{2log N}}), and {{add}} often requires another {{log N}}, 
 for example in the case where the inputs largely don't overlap (where {{N}} 
 is the number of iterators being merged).
 This can easily be replaced with a simple custom structure that can perform 
 replacement of the top of the queue in a single step, which will very often 
 complete after a couple of comparisons and in the worst case scenarios will 
 match the complexity of the current implementation.
 This should significantly improve merge performance for iterators with 
 limited overlap (e.g. levelled compaction).



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[3/3] cassandra git commit: Merge branch 'cassandra-2.1' into trunk

2015-03-12 Thread benedict
Merge branch 'cassandra-2.1' into trunk

Conflicts:
src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e5d119aa
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e5d119aa
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e5d119aa

Branch: refs/heads/trunk
Commit: e5d119aab49b1e19cd67f9d9dc0b0cc44bae90bb
Parents: 24915cd cbd4de8
Author: Benedict Elliott Smith bened...@apache.org
Authored: Thu Mar 12 12:27:36 2015 +
Committer: Benedict Elliott Smith bened...@apache.org
Committed: Thu Mar 12 12:27:36 2015 +

--
 .../io/sstable/format/big/BigTableWriter.java | 14 --
 1 file changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e5d119aa/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
--
diff --cc 
src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
index a1b923d,000..ea2549d
mode 100644,00..100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
@@@ -1,585 -1,0 +1,587 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * License); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an AS IS BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.io.sstable.format.big;
 +
 +import java.io.DataInput;
 +import java.io.File;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +import java.nio.ByteBuffer;
 +import java.util.Collections;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Set;
 +
 +import org.apache.cassandra.db.*;
 +import org.apache.cassandra.io.sstable.*;
 +import org.apache.cassandra.io.sstable.format.SSTableReader;
 +import org.apache.cassandra.io.sstable.format.SSTableWriter;
 +import org.apache.cassandra.io.sstable.format.Version;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import org.apache.cassandra.config.CFMetaData;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.db.compaction.AbstractCompactedRow;
 +import org.apache.cassandra.dht.IPartitioner;
 +import org.apache.cassandra.io.FSWriteError;
 +import org.apache.cassandra.io.compress.CompressedSequentialWriter;
 +import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 +import org.apache.cassandra.io.sstable.metadata.MetadataComponent;
 +import org.apache.cassandra.io.sstable.metadata.MetadataType;
 +import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
 +import org.apache.cassandra.io.util.DataOutputPlus;
 +import org.apache.cassandra.io.util.DataOutputStreamAndChannel;
 +import org.apache.cassandra.io.util.FileMark;
 +import org.apache.cassandra.io.util.FileUtils;
 +import org.apache.cassandra.io.util.SegmentedFile;
 +import org.apache.cassandra.io.util.SequentialWriter;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +import org.apache.cassandra.utils.FBUtilities;
 +import org.apache.cassandra.utils.FilterFactory;
 +import org.apache.cassandra.utils.IFilter;
 +import org.apache.cassandra.utils.Pair;
 +import org.apache.cassandra.utils.StreamingHistogram;
 +
 +public class BigTableWriter extends SSTableWriter
 +{
 +private static final Logger logger = 
LoggerFactory.getLogger(BigTableWriter.class);
 +
 +// not very random, but the only value that can't be mistaken for a legal 
column-name length
 +public static final int END_OF_ROW = 0x;
 +
 +private IndexWriter iwriter;
 +private SegmentedFile.Builder dbuilder;
 +private final SequentialWriter dataFile;
 +private DecoratedKey lastWrittenKey;
 +private FileMark dataMark;
 +
 +BigTableWriter(Descriptor descriptor, Long keyCount, Long repairedAt, 
CFMetaData metadata, IPartitioner partitioner, MetadataCollector 
metadataCollector)
 +{
 +super(descriptor, keyCount, repairedAt, metadata, partitioner, 

[2/3] cassandra git commit: pass correct (but unused in this context) value to SSTableWriter.afterAppend, and rename parameter name

2015-03-12 Thread benedict
pass correct (but unused in this context) value to SSTableWriter.afterAppend, 
and rename parameter name


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/cbd4de8f
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/cbd4de8f
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/cbd4de8f

Branch: refs/heads/trunk
Commit: cbd4de8f51cf0da1579abcd8d93803c4e8845024
Parents: 4831ba1
Author: Benedict Elliott Smith bened...@apache.org
Authored: Thu Mar 12 12:26:03 2015 +
Committer: Benedict Elliott Smith bened...@apache.org
Committed: Thu Mar 12 12:26:03 2015 +

--
 .../apache/cassandra/io/sstable/SSTableWriter.java| 14 --
 1 file changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/cbd4de8f/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
--
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java 
b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index b35b652..440961f 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@ -173,7 +173,7 @@ public class SSTableWriter extends SSTable
 return (lastWrittenKey == null) ? 0 : dataFile.getFilePointer();
 }
 
-private void afterAppend(DecoratedKey decoratedKey, long dataPosition, 
RowIndexEntry index)
+private void afterAppend(DecoratedKey decoratedKey, long dataEnd, 
RowIndexEntry index)
 {
 sstableMetadataCollector.addKey(decoratedKey.getKey());
 lastWrittenKey = decoratedKey;
@@ -182,9 +182,9 @@ public class SSTableWriter extends SSTable
 first = lastWrittenKey;
 
 if (logger.isTraceEnabled())
-logger.trace(wrote  + decoratedKey +  at  + dataPosition);
-iwriter.append(decoratedKey, index, dataPosition);
-dbuilder.addPotentialBoundary(dataPosition);
+logger.trace(wrote  + decoratedKey +  at  + dataEnd);
+iwriter.append(decoratedKey, index, dataEnd);
+dbuilder.addPotentialBoundary(dataEnd);
 }
 
 /**
@@ -222,16 +222,18 @@ public class SSTableWriter extends SSTable
 }
 
 long startPosition = beforeAppend(decoratedKey);
+long endPosition;
 try
 {
 RowIndexEntry entry = rawAppend(cf, startPosition, decoratedKey, 
dataFile.stream);
-afterAppend(decoratedKey, startPosition, entry);
+endPosition = dataFile.getFilePointer();
+afterAppend(decoratedKey, endPosition, entry);
 }
 catch (IOException e)
 {
 throw new FSWriteError(e, dataFile.getPath());
 }
-sstableMetadataCollector.update(dataFile.getFilePointer() - 
startPosition, cf.getColumnStats());
+sstableMetadataCollector.update(endPosition - startPosition, 
cf.getColumnStats());
 }
 
 public static RowIndexEntry rawAppend(ColumnFamily cf, long startPosition, 
DecoratedKey key, DataOutputPlus out) throws IOException



[jira] [Commented] (CASSANDRA-8915) Improve MergeIterator performance

2015-03-12 Thread Branimir Lambov (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8915?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14358560#comment-14358560
 ] 

Branimir Lambov commented on CASSANDRA-8915:


Patch is available 
[here|https://github.com/apache/cassandra/compare/trunk...blambov:8915-mergeiterator].

The code turned out to be somewhat more complicated than I expected because of 
the need to support lazy advance of the underlying iterators. The new iterator 
is between several times and 10-20% faster depending on the overlap of the 
inputs and the complexity of comparisons. I wasn't able to demonstrate the 
effect in cstar_perf (see 
[this|http://cstar.datastax.com/graph?stats=fb747232-c819-11e4-91b4-42010af0688fmetric=op_rateoperation=1_write]
 but also 
[this|http://cstar.datastax.com/graph?stats=ad2dbdb6-c843-11e4-a8b4-42010af0688fmetric=op_rateoperation=1_write]).
 Any suggestions on how to test compaction performance?

At the moment the patch fails {{BlacklistingCompactionsTest}} because of 
CASSANDRA-8960.

 Improve MergeIterator performance
 -

 Key: CASSANDRA-8915
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8915
 Project: Cassandra
  Issue Type: Improvement
Reporter: Branimir Lambov
Assignee: Branimir Lambov
Priority: Minor

 The implementation of {{MergeIterator}} uses a priority queue and applies a 
 pair of {{poll}}+{{add}} operations for every item in the resulting sequence. 
 This is quite inefficient as {{poll}} necessarily applies at least {{log N}} 
 comparisons (up to {{2log N}}), and {{add}} often requires another {{log N}}, 
 for example in the case where the inputs largely don't overlap (where {{N}} 
 is the number of iterators being merged).
 This can easily be replaced with a simple custom structure that can perform 
 replacement of the top of the queue in a single step, which will very often 
 complete after a couple of comparisons and in the worst case scenarios will 
 match the complexity of the current implementation.
 This should significantly improve merge performance for iterators with 
 limited overlap (e.g. levelled compaction).



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (CASSANDRA-8058) local consistency level during boostrap (may cause a write timeout on each write request)

2015-03-12 Thread Alan Boudreault (JIRA)

 [ 
https://issues.apache.org/jira/browse/CASSANDRA-8058?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Alan Boudreault updated CASSANDRA-8058:
---
Tester: Alan Boudreault

 local consistency level during boostrap (may cause a write timeout on each 
 write request)
 -

 Key: CASSANDRA-8058
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8058
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Nicolas DOUILLET
Assignee: Nicolas DOUILLET
 Fix For: 2.0.11, 2.1.1

 Attachments: 
 0001-during-boostrap-block-only-for-local-pending-endpoin.patch.txt, 
 0001-during-boostrap-block-only-for-local-pending-endpoint-v2.patch, 
 0001-during-boostrap-block-only-for-local-pending-endpoints-v2-1.patch


 Hi, 
 During bootstrap, for {{LOCAL_QUORUM}} and {{LOCAL_ONE}} consistencies, the 
 {{DatacenterWriteResponseHandler}} were waiting for pending remote endpoints.
 I think that's a regression, because it seems that it has been correctly 
 implemented in CASSANDRA-833, but removed later.
 It was specifically annoying in the case of {{RF=2}} and {{cl=LOCAL_QUORUM}}, 
 because during a bootstrap of a remote node, all requests ended in 
 {{WriteTimeout}}, because they were waiting for a response that would never 
 happen.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-7168) Add repair aware consistency levels

2015-03-12 Thread Jonathan Ellis (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-7168?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359581#comment-14359581
 ] 

Jonathan Ellis commented on CASSANDRA-7168:
---

Do we actually need to add a special ConsistencyLevel?  I'd rather just apply 
this as an optimization to all CL  ONE, replacing the data/digest split that 
is almost certainly less useful.

 Add repair aware consistency levels
 ---

 Key: CASSANDRA-7168
 URL: https://issues.apache.org/jira/browse/CASSANDRA-7168
 Project: Cassandra
  Issue Type: Improvement
  Components: Core
Reporter: T Jake Luciani
  Labels: performance
 Fix For: 3.0


 With CASSANDRA-5351 and CASSANDRA-2424 I think there is an opportunity to 
 avoid a lot of extra disk I/O when running queries with higher consistency 
 levels.  
 Since repaired data is by definition consistent and we know which sstables 
 are repaired, we can optimize the read path by having a REPAIRED_QUORUM which 
 breaks reads into two phases:
  
   1) Read from one replica the result from the repaired sstables. 
   2) Read from a quorum only the un-repaired data.
 For the node performing 1) we can pipeline the call so it's a single hop.
 In the long run (assuming data is repaired regularly) we will end up with 
 much closer to CL.ONE performance while maintaining consistency.
 Some things to figure out:
   - If repairs fail on some nodes we can have a situation where we don't have 
 a consistent repaired state across the replicas.  
   



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-8058) local consistency level during boostrap (may cause a write timeout on each write request)

2015-03-12 Thread Alan Boudreault (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-8058?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359583#comment-14359583
 ] 

Alan Boudreault commented on CASSANDRA-8058:


dtest in progress for this: https://github.com/riptano/cassandra-dtest/pull/194 
. Will mark as qa-resolved when merged.

 local consistency level during boostrap (may cause a write timeout on each 
 write request)
 -

 Key: CASSANDRA-8058
 URL: https://issues.apache.org/jira/browse/CASSANDRA-8058
 Project: Cassandra
  Issue Type: Bug
  Components: Core
Reporter: Nicolas DOUILLET
Assignee: Nicolas DOUILLET
 Fix For: 2.0.11, 2.1.1

 Attachments: 
 0001-during-boostrap-block-only-for-local-pending-endpoin.patch.txt, 
 0001-during-boostrap-block-only-for-local-pending-endpoint-v2.patch, 
 0001-during-boostrap-block-only-for-local-pending-endpoints-v2-1.patch


 Hi, 
 During bootstrap, for {{LOCAL_QUORUM}} and {{LOCAL_ONE}} consistencies, the 
 {{DatacenterWriteResponseHandler}} were waiting for pending remote endpoints.
 I think that's a regression, because it seems that it has been correctly 
 implemented in CASSANDRA-833, but removed later.
 It was specifically annoying in the case of {{RF=2}} and {{cl=LOCAL_QUORUM}}, 
 because during a bootstrap of a remote node, all requests ended in 
 {{WriteTimeout}}, because they were waiting for a response that would never 
 happen.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (CASSANDRA-5310) New authentication module does not wok in multi datacenters in case of network outage

2015-03-12 Thread Nick Bailey (JIRA)

[ 
https://issues.apache.org/jira/browse/CASSANDRA-5310?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14359589#comment-14359589
 ] 

Nick Bailey commented on CASSANDRA-5310:


Can we add some additional error messaging around this maybe? If we get 
UnavailableException when looking up the 'cassandra' user log that we have to 
use quorum for that user lookup? It's extremely confusing to run into this in 
the wild.

 New authentication module does not wok in multi datacenters in case of 
 network outage
 -

 Key: CASSANDRA-5310
 URL: https://issues.apache.org/jira/browse/CASSANDRA-5310
 Project: Cassandra
  Issue Type: Improvement
Affects Versions: 1.2.2
 Environment: Ubuntu 12.04
 Cluster of 16 nodes in 2 datacenters (8 nodes in each datacenter)
Reporter: jal
Assignee: Aleksey Yeschenko
Priority: Minor
 Fix For: 1.2.3

 Attachments: auth_fix_consistency.patch


 With 1.2.2, I am using the new authentication backend PasswordAuthenticator 
 with the authorizer CassandraAuthorizer
 In case of network outage, we are no more able to connect to Cassandra.
 Here is the error message we get when I want to connect through cqlsh:
 Traceback (most recent call last):
   File ./cqlsh, line 2262, in module
 main(*read_options(sys.argv[1:], os.environ))
   File ./cqlsh, line 2248, in main
 display_float_precision=options.float_precision)
   File ./cqlsh, line 483, in __init__
 cql_version=cqlver, transport=transport)
 File ./../lib/cql-internal-only-1.4.0.zip/cql-1.4.0/cql/connection.py, line 
 143, in connect
   File ./../lib/cql-internal-only-1.4.0.zip/cql-1.4.0/cql/connection.py, 
 line 59, in __init__
   File ./../lib/cql-internal-only-1.4.0.zip/cql-1.4.0/cql/thrifteries.py, 
 line 157, in establish_connection
   File 
 ./../lib/cql-internal-only-1.4.0.zip/cql-1.4.0/cql/cassandra/Cassandra.py, 
 line 455, in login
   File 
 ./../lib/cql-internal-only-1.4.0.zip/cql-1.4.0/cql/cassandra/Cassandra.py, 
 line 476, in recv_login
 cql.cassandra.ttypes.AuthenticationException: 
 AuthenticationException(why='org.apache.cassandra.exceptions.UnavailableException:
  Cannot achieve consistency level QUORUM')



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


  1   2   >