Merge branch 'cassandra-2.2' into cassandra-3.0

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/bd4cab24
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/bd4cab24
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/bd4cab24

Branch: refs/heads/cassandra-3.5
Commit: bd4cab24bb9582d15d6b4cf251e7da21230f1460
Parents: be38081 caaa9fc
Author: Josh McKenzie <josh.mcken...@datastax.com>
Authored: Fri Apr 1 11:47:39 2016 -0400
Committer: Josh McKenzie <josh.mcken...@datastax.com>
Committed: Fri Apr 1 11:49:41 2016 -0400

----------------------------------------------------------------------
 CHANGES.txt                                     |   1 +
 .../apache/cassandra/db/ColumnFamilyStore.java  |  24 ++-
 .../org/apache/cassandra/db/Directories.java    |   2 +-
 .../db/commitlog/CommitLogSegmentManager.java   |   4 +-
 .../cassandra/io/util/DiskAwareRunnable.java    |   5 +-
 .../apache/cassandra/cql3/OutOfSpaceTest.java   | 157 +++++++++++++++++++
 6 files changed, 185 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/bd4cab24/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 7fc628e,78ea961..b9376bc
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -75,8 -38,10 +75,9 @@@ Merged from 2.2
   * (cqlsh) Support utf-8/cp65001 encoding on Windows (CASSANDRA-11030)
   * Fix paging on DISTINCT queries repeats result when first row in partition 
changes
     (CASSANDRA-10010)
 + * cqlsh: change default encoding to UTF-8 (CASSANDRA-11124)
  Merged from 2.1:
 - * Add a -j parameter to scrub/cleanup/upgradesstables to state how
 -   many threads to use (CASSANDRA-11179)
 - * Backport CASSANDRA-10679 (CASSANDRA-9598)
++ * Fix out-of-space error treatment in memtable flushing (CASSANDRA-11448).
   * Don't do defragmentation if reading from repaired sstables 
(CASSANDRA-10342)
   * Fix streaming_socket_timeout_in_ms not enforced (CASSANDRA-11286)
   * Avoid dropping message too quickly due to missing unit conversion 
(CASSANDRA-11302)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/bd4cab24/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/bd4cab24/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/bd4cab24/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
----------------------------------------------------------------------
diff --cc 
src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
index 8a8d0e7,636c73b..2ee4eed
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
@@@ -34,8 -34,10 +34,9 @@@ import java.util.concurrent.LinkedBlock
  import java.util.concurrent.TimeUnit;
  import java.util.concurrent.atomic.AtomicLong;
  
+ import com.google.common.annotations.VisibleForTesting;
  import com.google.common.collect.Iterables;
  import com.google.common.util.concurrent.*;
 -
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/bd4cab24/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
index 0000000,8304aff..1527b1e
mode 000000,100644..100644
--- a/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
+++ b/test/unit/org/apache/cassandra/cql3/OutOfSpaceTest.java
@@@ -1,0 -1,157 +1,157 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.cassandra.cql3;
+ 
+ import static junit.framework.Assert.fail;
+ 
+ import java.io.IOError;
+ import java.util.UUID;
+ import java.util.concurrent.ExecutionException;
+ 
+ import org.junit.Assert;
+ import org.junit.Test;
+ 
+ import org.apache.cassandra.config.Config.DiskFailurePolicy;
+ import org.apache.cassandra.config.DatabaseDescriptor;
+ import org.apache.cassandra.db.BlacklistedDirectories;
+ import org.apache.cassandra.db.ColumnFamilyStore;
+ import org.apache.cassandra.db.Directories.DataDirectory;
+ import org.apache.cassandra.db.commitlog.CommitLog;
+ import org.apache.cassandra.db.commitlog.CommitLogSegment;
+ import org.apache.cassandra.db.Keyspace;
+ import org.apache.cassandra.gms.Gossiper;
+ import org.apache.cassandra.io.FSWriteError;
+ import org.apache.cassandra.utils.JVMStabilityInspector;
+ import org.apache.cassandra.utils.KillerForTests;
+ 
+ /**
+  * Test that TombstoneOverwhelmingException gets thrown when it should be and 
doesn't when it shouldn't be.
+  */
+ public class OutOfSpaceTest extends CQLTester
+ {
+     @Test
+     public void testFlushUnwriteableDie() throws Throwable
+     {
+         makeTable();
+         markDirectoriesUnwriteable();
+ 
+         KillerForTests killerForTests = new KillerForTests();
+         JVMStabilityInspector.Killer originalKiller = 
JVMStabilityInspector.replaceKiller(killerForTests);
+         DiskFailurePolicy oldPolicy = 
DatabaseDescriptor.getDiskFailurePolicy();
+         try
+         {
+             DatabaseDescriptor.setDiskFailurePolicy(DiskFailurePolicy.die);
+             flushAndExpectError();
+             Assert.assertTrue(killerForTests.wasKilled());
+             Assert.assertFalse(killerForTests.wasKilledQuietly()); //only 
killed quietly on startup failure
+         }
+         finally
+         {
+             DatabaseDescriptor.setDiskFailurePolicy(oldPolicy);
+             JVMStabilityInspector.replaceKiller(originalKiller);
+         }
+     }
+ 
+     @Test
+     public void testFlushUnwriteableStop() throws Throwable
+     {
+         makeTable();
+         markDirectoriesUnwriteable();
+ 
+         DiskFailurePolicy oldPolicy = 
DatabaseDescriptor.getDiskFailurePolicy();
+         try
+         {
+             DatabaseDescriptor.setDiskFailurePolicy(DiskFailurePolicy.stop);
+             flushAndExpectError();
+             Assert.assertFalse(Gossiper.instance.isEnabled());
+         }
+         finally
+         {
+             DatabaseDescriptor.setDiskFailurePolicy(oldPolicy);
+         }
+     }
+ 
+     @Test
+     public void testFlushUnwriteableIgnore() throws Throwable
+     {
+         makeTable();
+         markDirectoriesUnwriteable();
+ 
+         DiskFailurePolicy oldPolicy = 
DatabaseDescriptor.getDiskFailurePolicy();
+         try
+         {
+             DatabaseDescriptor.setDiskFailurePolicy(DiskFailurePolicy.ignore);
+             flushAndExpectError();
+         }
+         finally
+         {
+             DatabaseDescriptor.setDiskFailurePolicy(oldPolicy);
+         }
+ 
+         // Next flush should succeed.
+         makeTable();
+         flush();
+     }
+ 
+     public void makeTable() throws Throwable
+     {
+         createTable("CREATE TABLE %s (a text, b text, c text, PRIMARY KEY (a, 
b));");
+ 
+         // insert exactly the amount of tombstones that shouldn't trigger an 
exception
+         for (int i = 0; i < 10; i++)
+             execute("INSERT INTO %s (a, b, c) VALUES ('key', 'column" + i + 
"', null);");
+     }
+ 
+     public void markDirectoriesUnwriteable()
+     {
+         ColumnFamilyStore cfs = 
Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
+         try
+         {
+             for ( ; ; )
+             {
 -                DataDirectory dir = cfs.directories.getWriteableLocation(1);
 -                
BlacklistedDirectories.maybeMarkUnwritable(cfs.directories.getLocationForDisk(dir));
++                DataDirectory dir = 
cfs.getDirectories().getWriteableLocation(1);
++                
BlacklistedDirectories.maybeMarkUnwritable(cfs.getDirectories().getLocationForDisk(dir));
+             }
+         }
+         catch (IOError e)
+         {
+             // Expected -- marked all directories as unwritable
+         }
+     }
+ 
+     public void flushAndExpectError() throws InterruptedException, 
ExecutionException
+     {
+         try
+         {
+             
Keyspace.open(KEYSPACE).getColumnFamilyStore(currentTable()).forceFlush().get();
+             fail("FSWriteError expected.");
+         }
+         catch (ExecutionException e)
+         {
+             // Correct path.
+             Assert.assertTrue(e.getCause() instanceof FSWriteError);
+         }
+ 
+         // Make sure commit log wasn't discarded.
+         UUID cfid = currentTableMetadata().cfId;
+         for (CommitLogSegment segment : 
CommitLog.instance.allocator.getActiveSegments())
+             if (segment.getDirtyCFIDs().contains(cfid))
+                 return;
+         fail("Expected commit log to remain dirty for the affected table.");
+     }
+ }

Reply via email to