Add a separate production debug log for troubleshooting

patch by Paulo Motta; reviewed by Ariel Weisberg  for CASSANDRA-10241


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4a849efe
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4a849efe
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4a849efe

Branch: refs/heads/cassandra-3.0
Commit: 4a849efeb7c7c1a54bc12094d2d6a9f3f008a2fa
Parents: f3ad68c
Author: Paulo Motta <pauloricard...@gmail.com>
Authored: Tue Sep 22 22:00:23 2015 +0200
Committer: blerer <benjamin.le...@datastax.com>
Committed: Tue Sep 22 22:00:23 2015 +0200

----------------------------------------------------------------------
 NEWS.txt                                        | 10 ++++
 conf/logback.xml                                | 55 ++++++++++++++++---
 .../cassandra/auth/CassandraAuthorizer.java     |  2 +-
 .../cassandra/auth/CassandraRoleManager.java    |  4 +-
 .../cassandra/auth/PasswordAuthenticator.java   |  4 +-
 .../apache/cassandra/auth/PermissionsCache.java |  2 +-
 .../org/apache/cassandra/auth/RolesCache.java   |  2 +-
 .../apache/cassandra/cache/AutoSavingCache.java |  8 +--
 .../cassandra/cache/SerializingCache.java       |  2 +-
 .../org/apache/cassandra/client/RingCache.java  |  2 +-
 .../DebuggableScheduledThreadPoolExecutor.java  |  2 +-
 .../DebuggableThreadPoolExecutor.java           |  2 +-
 .../apache/cassandra/cql3/QueryProcessor.java   |  6 +--
 .../cql3/functions/JavaSourceUDFFactory.java    |  4 +-
 .../cql3/functions/ScriptBasedUDF.java          |  2 +-
 .../cql3/statements/CreateIndexStatement.java   |  2 +-
 .../apache/cassandra/db/BatchlogManager.java    | 10 ++--
 .../apache/cassandra/db/ColumnFamilyStore.java  | 50 ++++++++---------
 .../apache/cassandra/db/ConsistencyLevel.java   |  6 +--
 .../db/CounterMutationVerbHandler.java          |  2 +-
 .../db/DefinitionsUpdateVerbHandler.java        |  2 +-
 .../org/apache/cassandra/db/Directories.java    |  8 +--
 .../cassandra/db/HintedHandOffManager.java      | 24 ++++-----
 src/java/org/apache/cassandra/db/Keyspace.java  |  6 +--
 src/java/org/apache/cassandra/db/Memtable.java  | 10 ++--
 .../db/MigrationRequestVerbHandler.java         |  2 +-
 .../cassandra/db/SchemaCheckVerbHandler.java    |  2 +-
 .../cassandra/db/SizeEstimatesRecorder.java     |  6 +--
 .../cassandra/db/SliceFromReadCommand.java      |  2 +-
 .../org/apache/cassandra/db/SystemKeyspace.java |  2 +-
 .../cassandra/db/commitlog/CommitLog.java       |  6 +--
 .../db/commitlog/CommitLogArchiver.java         |  4 +-
 .../db/commitlog/CommitLogReplayer.java         | 22 ++++----
 .../db/commitlog/CommitLogSegmentManager.java   | 14 ++---
 .../db/compaction/CompactionController.java     |  4 +-
 .../db/compaction/CompactionManager.java        | 30 +++++------
 .../cassandra/db/compaction/CompactionTask.java |  8 +--
 .../DateTieredCompactionStrategy.java           | 10 ++--
 .../compaction/LeveledCompactionStrategy.java   |  4 +-
 .../db/compaction/LeveledManifest.java          | 32 +++++------
 .../SizeTieredCompactionStrategy.java           |  4 +-
 .../compaction/WrappingCompactionStrategy.java  |  2 +-
 .../writers/DefaultCompactionWriter.java        |  2 +-
 .../SplittingSizeTieredCompactionWriter.java    |  4 +-
 .../cassandra/db/filter/ExtendedFilter.java     |  2 +-
 .../AbstractSimplePerColumnSecondaryIndex.java  |  8 +--
 .../db/index/composites/CompositesIndex.java    |  4 +-
 .../db/index/composites/CompositesSearcher.java |  6 +--
 .../cassandra/db/index/keys/KeysSearcher.java   |  4 +-
 .../db/lifecycle/LifecycleTransaction.java      | 20 +++----
 .../apache/cassandra/db/lifecycle/Tracker.java  |  8 +--
 .../org/apache/cassandra/dht/BootStrapper.java  |  4 +-
 .../org/apache/cassandra/dht/RangeStreamer.java | 12 ++---
 .../hadoop/AbstractColumnFamilyInputFormat.java |  4 +-
 .../hadoop/ColumnFamilyInputFormat.java         |  4 +-
 .../hadoop/ColumnFamilyOutputFormat.java        |  4 +-
 .../hadoop/ColumnFamilyRecordReader.java        |  8 +--
 .../cassandra/hadoop/cql3/CqlRecordReader.java  |  6 +--
 ...mitedLocalNodeFirstLocalBalancingPolicy.java | 14 ++---
 .../cassandra/hadoop/pig/CassandraStorage.java  |  2 +-
 .../cassandra/hadoop/pig/CqlNativeStorage.java  |  2 +-
 .../io/sstable/IndexSummaryManager.java         | 10 ++--
 .../apache/cassandra/io/sstable/SSTable.java    |  2 +-
 .../io/sstable/format/SSTableReader.java        | 24 ++++-----
 .../io/sstable/metadata/MetadataSerializer.java |  8 +--
 .../org/apache/cassandra/io/util/FileUtils.java |  8 +--
 .../cassandra/io/util/MmappedSegmentedFile.java |  2 +-
 .../locator/AbstractReplicationStrategy.java    |  2 +-
 .../locator/NetworkTopologyStrategy.java        |  2 +-
 .../cassandra/locator/PropertyFileSnitch.java   |  6 +--
 .../locator/ReconnectableSnitchHelper.java      |  2 +-
 .../apache/cassandra/locator/TokenMetadata.java |  8 +--
 .../net/IncomingStreamingConnection.java        |  4 +-
 .../cassandra/net/IncomingTcpConnection.java    | 10 ++--
 .../cassandra/net/MessageDeliveryTask.java      |  2 +-
 .../apache/cassandra/net/MessagingService.java  | 20 +++----
 .../cassandra/net/OutboundTcpConnection.java    | 12 ++---
 .../cassandra/net/ResponseVerbHandler.java      |  2 +-
 .../cassandra/schema/LegacySchemaTables.java    |  2 +-
 .../cassandra/service/FileCacheService.java     |  4 +-
 .../apache/cassandra/service/GCInspector.java   |  4 +-
 .../cassandra/service/LoadBroadcaster.java      |  4 +-
 .../apache/cassandra/service/ReadCallback.java  | 12 ++---
 .../cassandra/service/RowDataResolver.java      | 12 ++---
 .../cassandra/service/RowDigestResolver.java    |  8 +--
 .../apache/cassandra/service/StorageProxy.java  | 14 ++---
 .../cassandra/thrift/CassandraServer.java       | 56 ++++++++++----------
 .../thrift/CustomTThreadPoolServer.java         |  4 +-
 .../cassandra/thrift/ThriftValidation.java      |  4 +-
 .../org/apache/cassandra/tracing/Tracing.java   |  2 +-
 .../org/apache/cassandra/transport/Message.java |  6 +--
 .../cassandra/triggers/CustomClassLoader.java   |  2 +-
 .../org/apache/cassandra/utils/CLibrary.java    |  2 +-
 .../cassandra/utils/EstimatedHistogram.java     |  4 +-
 .../org/apache/cassandra/utils/Mx4jTool.java    |  4 +-
 .../apache/cassandra/utils/OutputHandler.java   |  2 +-
 .../org/apache/cassandra/utils/TopKSampler.java |  2 +-
 97 files changed, 411 insertions(+), 360 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 78d90f6..6bd0a77 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -31,12 +31,22 @@ Changed Defaults
      providing the '-full' parameter to nodetool repair.
    - Parallel repairs are the default since 2.2.0, run sequential repairs
      by providing the '-seq' parameter to nodetool repair.
+   - The following INFO logs were reduced to DEBUG level and will now show
+     on debug.log instead of system.log:
+      - Memtable flushing actions
+      - Commit log replayed files
+      - Compacted sstables
+      - SStable opening (SSTableReader)
 
 New features
 ------------
    - Custom QueryHandlers can retrieve the column specifications for the bound
      variables from QueryOptions by using the hasColumnSpecifications()
      and getColumnSpecifications() methods.
+   - A new default assynchronous log appender debug.log was created in addition
+     to  the system.log appender in order to provide more detailed log 
debugging.
+     In order to disable debug logging, you must comment-out the ASYNCDEBUGLOG
+     appender on conf/logback.xml. See CASSANDRA-10241 for more information.
 
 
 2.2.1

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/conf/logback.xml
----------------------------------------------------------------------
diff --git a/conf/logback.xml b/conf/logback.xml
index eb2dcd1..a47740d 100644
--- a/conf/logback.xml
+++ b/conf/logback.xml
@@ -17,28 +17,67 @@
  under the License.
 -->
 
+<!--
+In order to disable debug.log, comment-out the ASYNCDEBUGLOG
+appender reference in the root level section below.
+-->
+
 <configuration scan="true">
   <jmxConfigurator />
-  <appender name="FILE" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
+  <shutdownHook class="ch.qos.logback.core.hook.DelayingShutdownHook"/>
+
+  <!-- SYSTEMLOG rolling file appender to system.log (INFO level) -->
+
+  <appender name="SYSTEMLOG" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+      <level>INFO</level>
+    </filter>
     <file>${cassandra.logdir}/system.log</file>
     <rollingPolicy 
class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
       <fileNamePattern>${cassandra.logdir}/system.log.%i.zip</fileNamePattern>
       <minIndex>1</minIndex>
       <maxIndex>20</maxIndex>
     </rollingPolicy>
+    <triggeringPolicy 
class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+      <maxFileSize>20MB</maxFileSize>
+    </triggeringPolicy>
+    <encoder>
+      <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
+    </encoder>
+  </appender>
 
+  <!-- DEBUGLOG rolling file appender to debug.log (all levels) -->
+
+  <appender name="DEBUGLOG" 
class="ch.qos.logback.core.rolling.RollingFileAppender">
+    <file>${cassandra.logdir}/debug.log</file>
+    <rollingPolicy 
class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+      <fileNamePattern>${cassandra.logdir}/debug.log.%i.zip</fileNamePattern>
+      <minIndex>1</minIndex>
+      <maxIndex>20</maxIndex>
+    </rollingPolicy>
     <triggeringPolicy 
class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
       <maxFileSize>20MB</maxFileSize>
     </triggeringPolicy>
     <encoder>
       <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
-      <!-- old-style log format
-      <pattern>%5level [%thread] %date{ISO8601} %F (line %L) %msg%n</pattern>
-      -->
     </encoder>
   </appender>
-  
+
+  <!-- ASYNCLOG assynchronous appender to debug.log (all levels) -->
+
+  <appender name="ASYNCDEBUGLOG" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1024</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <includeCallerData>true</includeCallerData>
+    <appender-ref ref="DEBUGLOG" />
+  </appender>
+
+  <!-- STDOUT console appender to stdout (INFO level) -->
+
   <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+      <level>INFO</level>
+    </filter>
     <encoder>
       <pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern>
     </encoder>
@@ -49,12 +88,14 @@
    -->
 
   <root level="INFO">
-    <appender-ref ref="FILE" />
+    <appender-ref ref="SYSTEMLOG" />
     <appender-ref ref="STDOUT" />
+    <appender-ref ref="ASYNCDEBUGLOG" /> <!-- Comment this line to disable 
debug.log -->
     <!--
     <appender-ref ref="LogbackMetrics" />
     -->
   </root>
-  
+
+  <logger name="org.apache.cassandra" level="DEBUG"/>
   <logger name="com.thinkaurelius.thrift" level="ERROR"/>
 </configuration>

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java 
b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
index 5cdea3f..88069a2 100644
--- a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
@@ -439,7 +439,7 @@ public class CassandraAuthorizer implements IAuthorizer
         {
             logger.info("Unable to complete conversion of legacy permissions 
data (perhaps not enough nodes are upgraded yet). " +
                         "Conversion should not be considered complete");
-            logger.debug("Conversion error", e);
+            logger.trace("Conversion error", e);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java 
b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
index 9151958..3a59581 100644
--- a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
@@ -376,7 +376,7 @@ public class CassandraRoleManager implements IRoleManager
                 // will be finished by then.
                 if (!MessagingService.instance().areAllNodesAtLeast22())
                 {
-                    logger.debug("Not all nodes are upgraded to a version that 
supports Roles yet, rescheduling setup task");
+                    logger.trace("Not all nodes are upgraded to a version that 
supports Roles yet, rescheduling setup task");
                     scheduleSetupTask(setupTask);
                     return;
                 }
@@ -442,7 +442,7 @@ public class CassandraRoleManager implements IRoleManager
         {
             logger.info("Unable to complete conversion of legacy auth data 
(perhaps not enough nodes are upgraded yet). " +
                         "Conversion should not be considered complete");
-            logger.debug("Conversion error", e);
+            logger.trace("Conversion error", e);
             throw e;
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java 
b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
index 87bc073..c0d2283 100644
--- a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
+++ b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
@@ -86,7 +86,7 @@ public class PasswordAuthenticator implements IAuthenticator
         }
         catch (RequestExecutionException e)
         {
-            logger.debug("Error performing internal authentication", e);
+            logger.trace("Error performing internal authentication", e);
             throw new AuthenticationException(e.toString());
         }
     }
@@ -196,7 +196,7 @@ public class PasswordAuthenticator implements IAuthenticator
          */
         private void decodeCredentials(byte[] bytes) throws 
AuthenticationException
         {
-            logger.debug("Decoding credentials from client token");
+            logger.trace("Decoding credentials from client token");
             byte[] user = null;
             byte[] pass = null;
             int end = bytes.length;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/auth/PermissionsCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/PermissionsCache.java 
b/src/java/org/apache/cassandra/auth/PermissionsCache.java
index bc96d82..8746b36 100644
--- a/src/java/org/apache/cassandra/auth/PermissionsCache.java
+++ b/src/java/org/apache/cassandra/auth/PermissionsCache.java
@@ -137,7 +137,7 @@ public class PermissionsCache implements 
PermissionsCacheMBean
                                            }
                                            catch (Exception e)
                                            {
-                                               logger.debug("Error performing 
async refresh of user permissions", e);
+                                               logger.trace("Error performing 
async refresh of user permissions", e);
                                                throw e;
                                            }
                                        }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/auth/RolesCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/RolesCache.java 
b/src/java/org/apache/cassandra/auth/RolesCache.java
index 58aa739..554df9e 100644
--- a/src/java/org/apache/cassandra/auth/RolesCache.java
+++ b/src/java/org/apache/cassandra/auth/RolesCache.java
@@ -135,7 +135,7 @@ public class RolesCache implements RolesCacheMBean
                                     return roleManager.getRoles(primaryRole, 
true);
                                 } catch (Exception e)
                                 {
-                                    logger.debug("Error performing async 
refresh of user roles", e);
+                                    logger.trace("Error performing async 
refresh of user roles", e);
                                     throw e;
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/cache/AutoSavingCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java 
b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 48d60b7..c08925d 100644
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@ -256,8 +256,8 @@ public class AutoSavingCache<K extends CacheKey, V> extends 
InstrumentingCache<K
                 FileUtils.closeQuietly(in);
             }
         }
-        if (logger.isDebugEnabled())
-            logger.debug("completed reading ({} ms; {} keys) saved cache {}",
+        if (logger.isTraceEnabled())
+            logger.trace("completed reading ({} ms; {} keys) saved cache {}",
                     TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start), 
count, dataPath);
         return count;
     }
@@ -320,12 +320,12 @@ public class AutoSavingCache<K extends CacheKey, V> 
extends InstrumentingCache<K
 
         public void saveCache()
         {
-            logger.debug("Deleting old {} files.", cacheType);
+            logger.trace("Deleting old {} files.", cacheType);
             deleteOldCacheFiles();
 
             if (!keyIterator.hasNext())
             {
-                logger.debug("Skipping {} save, cache is empty.", cacheType);
+                logger.trace("Skipping {} save, cache is empty.", cacheType);
                 return;
             }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/cache/SerializingCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cache/SerializingCache.java 
b/src/java/org/apache/cassandra/cache/SerializingCache.java
index 0e38922..01d70b4 100644
--- a/src/java/org/apache/cassandra/cache/SerializingCache.java
+++ b/src/java/org/apache/cassandra/cache/SerializingCache.java
@@ -92,7 +92,7 @@ public class SerializingCache<K, V> implements ICache<K, V>
         }
         catch (IOException e)
         {
-            logger.debug("Cannot fetch in memory data, we will fallback to 
read from disk ", e);
+            logger.trace("Cannot fetch in memory data, we will fallback to 
read from disk ", e);
             return null;
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/client/RingCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/client/RingCache.java 
b/src/java/org/apache/cassandra/client/RingCache.java
index 094cf4f..5196bce 100644
--- a/src/java/org/apache/cassandra/client/RingCache.java
+++ b/src/java/org/apache/cassandra/client/RingCache.java
@@ -93,7 +93,7 @@ public class RingCache
         }
         catch (TException e)
         {
-            logger.debug("Error contacting seed list {} {}", 
ConfigHelper.getOutputInitialAddress(conf), e.getMessage());
+            logger.trace("Error contacting seed list {} {}", 
ConfigHelper.getOutputInitialAddress(conf), e.getMessage());
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
 
b/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
index ea0715c..a722b87 100644
--- 
a/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
+++ 
b/src/java/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutor.java
@@ -54,7 +54,7 @@ public class DebuggableScheduledThreadPoolExecutor extends 
ScheduledThreadPoolEx
                 if (task instanceof Future)
                     ((Future) task).cancel(false);
 
-                logger.debug("ScheduledThreadPoolExecutor has shut down as 
part of C* shutdown");
+                logger.trace("ScheduledThreadPoolExecutor has shut down as 
part of C* shutdown");
             }
             else
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java 
b/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java
index fe6cade..a6d0049 100644
--- a/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java
+++ b/src/java/org/apache/cassandra/concurrent/DebuggableThreadPoolExecutor.java
@@ -266,7 +266,7 @@ public class DebuggableThreadPoolExecutor extends 
ThreadPoolExecutor implements
             }
             catch (CancellationException e)
             {
-                logger.debug("Task cancelled", e);
+                logger.trace("Task cancelled", e);
             }
             catch (ExecutionException e)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java 
b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index 161d8d0..30a111d 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -618,20 +618,20 @@ public class QueryProcessor implements QueryHandler
 
         public void onUpdateColumnFamily(String ksName, String cfName, boolean 
columnsDidChange)
         {
-            logger.debug("Column definitions for {}.{} changed, invalidating 
related prepared statements", ksName, cfName);
+            logger.trace("Column definitions for {}.{} changed, invalidating 
related prepared statements", ksName, cfName);
             if (columnsDidChange)
                 removeInvalidPreparedStatements(ksName, cfName);
         }
 
         public void onDropKeyspace(String ksName)
         {
-            logger.debug("Keyspace {} was dropped, invalidating related 
prepared statements", ksName);
+            logger.trace("Keyspace {} was dropped, invalidating related 
prepared statements", ksName);
             removeInvalidPreparedStatements(ksName, null);
         }
 
         public void onDropColumnFamily(String ksName, String cfName)
         {
-            logger.debug("Table {}.{} was dropped, invalidating related 
prepared statements", ksName, cfName);
+            logger.trace("Table {}.{} was dropped, invalidating related 
prepared statements", ksName, cfName);
             removeInvalidPreparedStatements(ksName, cfName);
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/cql3/functions/JavaSourceUDFFactory.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/cql3/functions/JavaSourceUDFFactory.java 
b/src/java/org/apache/cassandra/cql3/functions/JavaSourceUDFFactory.java
index 97a08b1..c40e031 100644
--- a/src/java/org/apache/cassandra/cql3/functions/JavaSourceUDFFactory.java
+++ b/src/java/org/apache/cassandra/cql3/functions/JavaSourceUDFFactory.java
@@ -170,7 +170,7 @@ public final class JavaSourceUDFFactory
 
         String javaSource = javaSourceBuilder.toString();
 
-        logger.debug("Compiling Java source UDF '{}' as class '{}' using 
source:\n{}", name, targetClassName, javaSource);
+        logger.trace("Compiling Java source UDF '{}' as class '{}' using 
source:\n{}", name, targetClassName, javaSource);
 
         try
         {
@@ -303,7 +303,7 @@ public final class JavaSourceUDFFactory
             if (i > 0)
                 code.append(",\n");
 
-            if (logger.isDebugEnabled())
+            if (logger.isTraceEnabled())
                 code.append("                /* parameter 
'").append(argNames.get(i)).append("' */\n");
 
             code

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDF.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDF.java 
b/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDF.java
index 4d9a79f..e55d450 100644
--- a/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDF.java
+++ b/src/java/org/apache/cassandra/cql3/functions/ScriptBasedUDF.java
@@ -143,7 +143,7 @@ public class ScriptBasedUDF extends UDFunction
         }
         catch (RuntimeException | ScriptException e)
         {
-            logger.debug("Execution of UDF '{}' failed", name, e);
+            logger.trace("Execution of UDF '{}' failed", name, e);
             throw FunctionExecutionException.create(this, e);
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java 
b/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
index c3b0993..edc092d 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
@@ -164,7 +164,7 @@ public class CreateIndexStatement extends 
SchemaAlteringStatement
     {
         CFMetaData cfm = Schema.instance.getCFMetaData(keyspace(), 
columnFamily()).copy();
         IndexTarget target = rawTarget.prepare(cfm);
-        logger.debug("Updating column {} definition for index {}", 
target.column, indexName);
+        logger.trace("Updating column {} definition for index {}", 
target.column, indexName);
         ColumnDefinition cd = cfm.getColumnDefinition(target.column);
 
         if (cd.getIndexType() != null && ifNotExists)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/BatchlogManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/BatchlogManager.java 
b/src/java/org/apache/cassandra/db/BatchlogManager.java
index 1c1b37b..ba56f0d 100644
--- a/src/java/org/apache/cassandra/db/BatchlogManager.java
+++ b/src/java/org/apache/cassandra/db/BatchlogManager.java
@@ -163,7 +163,7 @@ public class BatchlogManager implements BatchlogManagerMBean
 
     private void replayAllFailedBatches() throws ExecutionException, 
InterruptedException
     {
-        logger.debug("Started replayAllFailedBatches");
+        logger.trace("Started replayAllFailedBatches");
 
         // rate limit is in bytes per second. Uses Double.MAX_VALUE if 
disabled (set to 0 in cassandra.yaml).
         // max rate is scaled by the number of nodes in the cluster (same as 
for HHOM - see CASSANDRA-5272).
@@ -191,7 +191,7 @@ public class BatchlogManager implements BatchlogManagerMBean
 
         cleanup();
 
-        logger.debug("Finished replayAllFailedBatches");
+        logger.trace("Finished replayAllFailedBatches");
     }
 
     private void deleteBatch(UUID id)
@@ -274,7 +274,7 @@ public class BatchlogManager implements BatchlogManagerMBean
 
         public int replay(RateLimiter rateLimiter) throws IOException
         {
-            logger.debug("Replaying batch {}", id);
+            logger.trace("Replaying batch {}", id);
 
             List<Mutation> mutations = replayingMutations();
 
@@ -303,8 +303,8 @@ public class BatchlogManager implements BatchlogManagerMBean
                 }
                 catch (WriteTimeoutException|WriteFailureException e)
                 {
-                    logger.debug("Failed replaying a batched mutation to a 
node, will write a hint");
-                    logger.debug("Failure was : {}", e.getMessage());
+                    logger.trace("Failed replaying a batched mutation to a 
node, will write a hint");
+                    logger.trace("Failure was : {}", e.getMessage());
                     // writing hints for the rest to hints, starting from i
                     writeHintsForUndeliveredEndpoints(i);
                     return;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java 
b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index a8a8910..4b418b4 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -217,7 +217,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
         int period = metadata.getMemtableFlushPeriod();
         if (period > 0)
         {
-            logger.debug("scheduling flush in {} ms", period);
+            logger.trace("scheduling flush in {} ms", period);
             WrappedRunnable runnable = new WrappedRunnable()
             {
                 protected void runMayThrow() throws Exception
@@ -422,7 +422,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
             {
                 throw new RuntimeException(e);
             }
-            logger.debug("retryPolicy for {} is {}", name, 
this.metadata.getSpeculativeRetry());
+            logger.trace("retryPolicy for {} is {}", name, 
this.metadata.getSpeculativeRetry());
             latencyCalculator = 
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(new Runnable()
             {
                 public void run()
@@ -570,7 +570,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
             }
         }
 
-        logger.debug("Removing compacted SSTable files from {} (see 
http://wiki.apache.org/cassandra/MemtableSSTable)", metadata.cfName);
+        logger.trace("Removing compacted SSTable files from {} (see 
http://wiki.apache.org/cassandra/MemtableSSTable)", metadata.cfName);
 
         for (Map.Entry<Descriptor,Set<Component>> sstableFiles : 
directories.sstableLister().list().entrySet())
         {
@@ -649,7 +649,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
         {
             HashSet<Integer> missingGenerations = new 
HashSet<>(unfinishedGenerations);
             missingGenerations.removeAll(allGenerations);
-            logger.debug("Unfinished compactions of {}.{} reference missing 
sstables of generations {}",
+            logger.trace("Unfinished compactions of {}.{} reference missing 
sstables of generations {}",
                          metadata.ksName, metadata.cfName, missingGenerations);
         }
 
@@ -682,7 +682,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                 // any of the ancestors would work, so we'll just lookup the 
compaction task ID with the first one
                 UUID compactionTaskID = 
unfinishedCompactions.get(ancestors.iterator().next());
                 assert compactionTaskID != null;
-                logger.debug("Going to delete unfinished compaction product 
{}", desc);
+                logger.trace("Going to delete unfinished compaction product 
{}", desc);
                 SSTable.delete(desc, sstableFiles.getValue());
                 SystemKeyspace.finishCompaction(compactionTaskID);
             }
@@ -699,7 +699,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
             if (completedAncestors.contains(desc.generation))
             {
                 // if any of the ancestors were participating in a compaction, 
finish that compaction
-                logger.debug("Going to delete leftover compaction ancestor 
{}", desc);
+                logger.trace("Going to delete leftover compaction ancestor 
{}", desc);
                 SSTable.delete(desc, sstableFiles.getValue());
                 UUID compactionTaskID = 
unfinishedCompactions.get(desc.generation);
                 if (compactionTaskID != null)
@@ -916,7 +916,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
             }
         }
 
-        logger.info("Enqueuing flush of {}: {}", name, String.format("%d 
(%.0f%%) on-heap, %d (%.0f%%) off-heap",
+        logger.debug("Enqueuing flush of {}: {}", name, String.format("%d 
(%.0f%%) on-heap, %d (%.0f%%) off-heap",
                                                                      
onHeapTotal, onHeapRatio * 100, offHeapTotal, offHeapRatio * 100));
     }
 
@@ -955,7 +955,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                 {
                     public void run()
                     {
-                        logger.debug("forceFlush requested but everything is 
clean in {}", name);
+                        logger.trace("forceFlush requested but everything is 
clean in {}", name);
                     }
                 }, null);
                 postFlushExecutor.execute(task);
@@ -1208,7 +1208,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                 float flushingOffHeap = 
Memtable.MEMORY_POOL.offHeap.reclaimingRatio();
                 float thisOnHeap = 
largest.getAllocator().onHeap().ownershipRatio();
                 float thisOffHeap = 
largest.getAllocator().onHeap().ownershipRatio();
-                logger.info("Flushing largest {} to free up room. Used total: 
{}, live: {}, flushing: {}, this: {}",
+                logger.debug("Flushing largest {} to free up room. Used total: 
{}, live: {}, flushing: {}, this: {}",
                             largest.cfs, ratio(usedOnHeap, usedOffHeap), 
ratio(liveOnHeap, liveOffHeap),
                             ratio(flushingOnHeap, flushingOffHeap), 
ratio(thisOnHeap, thisOffHeap));
                 largest.cfs.switchMemtableIfCurrent(largest);
@@ -1343,7 +1343,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
      */
     public Collection<SSTableReader> 
getOverlappingSSTables(Iterable<SSTableReader> sstables)
     {
-        logger.debug("Checking for sstables overlapping {}", sstables);
+        logger.trace("Checking for sstables overlapping {}", sstables);
 
         // a normal compaction won't ever have an empty sstables list, but we 
create a skeleton
         // compaction controller for streaming, and that passes an empty list.
@@ -1972,7 +1972,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                     }
                 }
 
-                logger.debug("ViewFilter for {}/{} sstables", sstables.size(), 
getSSTables().size());
+                logger.trace("ViewFilter for {}/{} sstables", sstables.size(), 
getSSTables().size());
                 return ImmutableList.copyOf(sstables);
             }
         };
@@ -2328,8 +2328,8 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                     ssTable.createLinks(snapshotDirectory.getPath()); // hard 
links
                     
filesJSONArr.add(ssTable.descriptor.relativeFilenameFor(Component.DATA));
 
-                    if (logger.isDebugEnabled())
-                        logger.debug("Snapshot for {} keyspace data file {} 
created in {}", keyspace, ssTable.getFilename(), snapshotDirectory);
+                    if (logger.isTraceEnabled())
+                        logger.trace("Snapshot for {} keyspace data file {} 
created in {}", keyspace, ssTable.getFilename(), snapshotDirectory);
                     snapshottedSSTables.add(ssTable);
                 }
 
@@ -2373,7 +2373,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                 ephemeralSnapshotMarker.getParentFile().mkdirs();
 
             Files.createFile(ephemeralSnapshotMarker.toPath());
-            logger.debug("Created ephemeral snapshot marker file on {}.", 
ephemeralSnapshotMarker.getAbsolutePath());
+            logger.trace("Created ephemeral snapshot marker file on {}.", 
ephemeralSnapshotMarker.getAbsolutePath());
         }
         catch (IOException e)
         {
@@ -2388,7 +2388,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
     {
         for (String ephemeralSnapshot : directories.listEphemeralSnapshots())
         {
-            logger.debug("Clearing ephemeral snapshot {} leftover from 
previous session.", ephemeralSnapshot);
+            logger.trace("Clearing ephemeral snapshot {} leftover from 
previous session.", ephemeralSnapshot);
             Directories.clearSnapshot(ephemeralSnapshot, 
directories.getCFDirectories());
         }
     }
@@ -2409,17 +2409,17 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                 SSTableReader sstable = 
active.get(entries.getKey().generation);
                 if (sstable == null || !refs.tryRef(sstable))
                 {
-                    if (logger.isDebugEnabled())
-                        logger.debug("using snapshot sstable {}", 
entries.getKey());
+                    if (logger.isTraceEnabled())
+                        logger.trace("using snapshot sstable {}", 
entries.getKey());
                     // open without tracking hotness
                     sstable = SSTableReader.open(entries.getKey(), 
entries.getValue(), metadata, partitioner, true, false);
                     refs.tryRef(sstable);
                     // release the self ref as we never add the snapshot 
sstable to DataTracker where it is otherwise released
                     sstable.selfRef().release();
                 }
-                else if (logger.isDebugEnabled())
+                else if (logger.isTraceEnabled())
                 {
-                    logger.debug("using active sstable {}", entries.getKey());
+                    logger.trace("using active sstable {}", entries.getKey());
                 }
             }
         }
@@ -2634,7 +2634,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
         // beginning if we restart before they [the CL segments] are discarded 
for
         // normal reasons post-truncate.  To prevent this, we store truncation
         // position in the System keyspace.
-        logger.debug("truncating {}", name);
+        logger.trace("truncating {}", name);
 
         if (keyspace.getMetadata().durableWrites || 
DatabaseDescriptor.isAutoSnapshot())
         {
@@ -2660,7 +2660,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
         {
             public void run()
             {
-                logger.debug("Discarding sstable data for truncated CF + 
indexes");
+                logger.trace("Discarding sstable data for truncated CF + 
indexes");
 
                 final long truncatedAt = System.currentTimeMillis();
                 data.notifyTruncated(truncatedAt);
@@ -2674,13 +2674,13 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                     index.truncateBlocking(truncatedAt);
 
                 SystemKeyspace.saveTruncationRecord(ColumnFamilyStore.this, 
truncatedAt, replayAfter);
-                logger.debug("cleaning out row cache");
+                logger.trace("cleaning out row cache");
                 invalidateCaches();
             }
         };
 
         runWithCompactionsDisabled(Executors.callable(truncateRunnable), true);
-        logger.debug("truncate complete");
+        logger.trace("truncate complete");
     }
 
     public <V> V runWithCompactionsDisabled(Callable<V> callable, boolean 
interruptValidation)
@@ -2689,7 +2689,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
         // and so we only run one major compaction at a time
         synchronized (this)
         {
-            logger.debug("Cancelling in-progress compactions for {}", 
metadata.cfName);
+            logger.trace("Cancelling in-progress compactions for {}", 
metadata.cfName);
 
             Iterable<ColumnFamilyStore> selfWithIndexes = concatWithIndexes();
             for (ColumnFamilyStore cfs : selfWithIndexes)
@@ -2709,7 +2709,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
                         return null;
                     }
                 }
-                logger.debug("Compactions successfully cancelled");
+                logger.trace("Compactions successfully cancelled");
 
                 // run our task
                 try

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/ConsistencyLevel.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ConsistencyLevel.java 
b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
index d49d66a..85ec0f3 100644
--- a/src/java/org/apache/cassandra/db/ConsistencyLevel.java
+++ b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
@@ -260,7 +260,7 @@ public enum ConsistencyLevel
                 int localLive = countLocalEndpoints(liveEndpoints);
                 if (localLive < blockFor)
                 {
-                    if (logger.isDebugEnabled())
+                    if (logger.isTraceEnabled())
                     {
                         StringBuilder builder = new StringBuilder("Local 
replicas [");
                         for (InetAddress endpoint : liveEndpoints)
@@ -269,7 +269,7 @@ public enum ConsistencyLevel
                                 builder.append(endpoint).append(",");
                         }
                         builder.append("] are insufficient to satisfy 
LOCAL_QUORUM requirement of ").append(blockFor).append(" live nodes in 
'").append(DatabaseDescriptor.getLocalDataCenter()).append("'");
-                        logger.debug(builder.toString());
+                        logger.trace(builder.toString());
                     }
                     throw new UnavailableException(this, blockFor, localLive);
                 }
@@ -291,7 +291,7 @@ public enum ConsistencyLevel
                 int live = Iterables.size(liveEndpoints);
                 if (live < blockFor)
                 {
-                    logger.debug("Live nodes {} do not satisfy 
ConsistencyLevel ({} required)", Iterables.toString(liveEndpoints), blockFor);
+                    logger.trace("Live nodes {} do not satisfy 
ConsistencyLevel ({} required)", Iterables.toString(liveEndpoints), blockFor);
                     throw new UnavailableException(this, blockFor, live);
                 }
                 break;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java 
b/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
index d9ee38a..4dd8ac3 100644
--- a/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/CounterMutationVerbHandler.java
@@ -35,7 +35,7 @@ public class CounterMutationVerbHandler implements 
IVerbHandler<CounterMutation>
     public void doVerb(final MessageIn<CounterMutation> message, final int id)
     {
         final CounterMutation cm = message.payload;
-        logger.debug("Applying forwarded {}", cm);
+        logger.trace("Applying forwarded {}", cm);
 
         String localDataCenter = 
DatabaseDescriptor.getEndpointSnitch().getDatacenter(FBUtilities.getBroadcastAddress());
         // We should not wait for the result of the write in this thread,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java 
b/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
index d5ede03..51d15b4 100644
--- a/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
@@ -41,7 +41,7 @@ public class DefinitionsUpdateVerbHandler implements 
IVerbHandler<Collection<Mut
 
     public void doVerb(final MessageIn<Collection<Mutation>> message, int id)
     {
-        logger.debug("Received schema mutation push from {}", message.from);
+        logger.trace("Received schema mutation push from {}", message.from);
 
         StageManager.getStage(Stage.MIGRATION).submit(new WrappedRunnable()
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Directories.java 
b/src/java/org/apache/cassandra/db/Directories.java
index 0011baf..066b759 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -258,7 +258,7 @@ public class Directories
                 for (File indexFile : indexFiles)
                 {
                     File destFile = new File(dataPath, indexFile.getName());
-                    logger.debug("Moving index file {} to {}", indexFile, 
destFile);
+                    logger.trace("Moving index file {} to {}", indexFile, 
destFile);
                     FileUtils.renameWithConfirm(indexFile, destFile);
                 }
             }
@@ -329,14 +329,14 @@ public class Directories
         {
             if 
(BlacklistedDirectories.isUnwritable(getLocationForDisk(dataDir)))
             {
-                logger.debug("removing blacklisted candidate {}", 
dataDir.location);
+                logger.trace("removing blacklisted candidate {}", 
dataDir.location);
                 continue;
             }
             DataDirectoryCandidate candidate = new 
DataDirectoryCandidate(dataDir);
             // exclude directory if its total writeSize does not fit to data 
directory
             if (candidate.availableSpace < writeSize)
             {
-                logger.debug("removing candidate {}, usable={}, requested={}", 
candidate.dataDirectory.location, candidate.availableSpace, writeSize);
+                logger.trace("removing candidate {}, usable={}, requested={}", 
candidate.dataDirectory.location, candidate.availableSpace, writeSize);
                 tooBig = true;
                 continue;
             }
@@ -728,7 +728,7 @@ public class Directories
             File snapshotDir = new File(dir, join(SNAPSHOT_SUBDIR, tag));
             if (snapshotDir.exists())
             {
-                logger.debug("Removing snapshot directory {}", snapshotDir);
+                logger.trace("Removing snapshot directory {}", snapshotDir);
                 try
                 {
                     FileUtils.deleteRecursive(snapshotDir);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/HintedHandOffManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/HintedHandOffManager.java 
b/src/java/org/apache/cassandra/db/HintedHandOffManager.java
index dae85b7..179c611 100644
--- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java
+++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java
@@ -175,7 +175,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
         {
             throw new RuntimeException(e);
         }
-        logger.debug("Created HHOM instance, registered MBean.");
+        logger.trace("Created HHOM instance, registered MBean.");
 
         Runnable runnable = new Runnable()
         {
@@ -317,7 +317,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
         }
         if (gossiper.getEndpointStateForEndpoint(endpoint) == null)
             throw new TimeoutException("Node " + endpoint + " vanished while 
waiting for agreement");
-        logger.debug("schema for {} matches local schema", endpoint);
+        logger.trace("schema for {} matches local schema", endpoint);
         return waited;
     }
 
@@ -329,11 +329,11 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
         // check if hints delivery has been paused
         if (hintedHandOffPaused)
         {
-            logger.debug("Hints delivery process is paused, aborting");
+            logger.trace("Hints delivery process is paused, aborting");
             return;
         }
 
-        logger.debug("Checking remote({}) schema before delivering hints", 
endpoint);
+        logger.trace("Checking remote({}) schema before delivering hints", 
endpoint);
         try
         {
             waitForSchemaAgreement(endpoint);
@@ -345,7 +345,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
 
         if (!FailureDetector.instance.isAlive(endpoint))
         {
-            logger.debug("Endpoint {} died before hint delivery, aborting", 
endpoint);
+            logger.trace("Endpoint {} died before hint delivery, aborting", 
endpoint);
             return;
         }
 
@@ -370,7 +370,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
         Composite startColumn = Composites.EMPTY;
 
         int pageSize = calculatePageSize();
-        logger.debug("Using pageSize of {}", pageSize);
+        logger.trace("Using pageSize of {}", pageSize);
 
         // rate limit is in bytes per second. Uses Double.MAX_VALUE if 
disabled (set to 0 in cassandra.yaml).
         // max rate is scaled by the number of nodes in the cluster 
(CASSANDRA-5272).
@@ -411,7 +411,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
                 // check if hints delivery has been paused during the process
                 if (hintedHandOffPaused)
                 {
-                    logger.debug("Hints delivery process is paused, aborting");
+                    logger.trace("Hints delivery process is paused, aborting");
                     break delivery;
                 }
 
@@ -434,7 +434,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
                 }
                 catch (UnknownColumnFamilyException e)
                 {
-                    logger.debug("Skipping delivery of hint for deleted 
table", e);
+                    logger.trace("Skipping delivery of hint for deleted 
table", e);
                     deleteHint(hostIdBytes, hint.name(), hint.timestamp());
                     continue;
                 }
@@ -447,7 +447,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
                 {
                     if (hint.timestamp() <= 
SystemKeyspace.getTruncatedAt(cfId))
                     {
-                        logger.debug("Skipping delivery of hint for truncated 
table {}", cfId);
+                        logger.trace("Skipping delivery of hint for truncated 
table {}", cfId);
                         mutation = mutation.without(cfId);
                     }
                 }
@@ -513,7 +513,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
      */
     private void scheduleAllDeliveries()
     {
-        logger.debug("Started scheduleAllDeliveries");
+        logger.trace("Started scheduleAllDeliveries");
 
         // Force a major compaction to get rid of the tombstones and expired 
hints. Do it once, before we schedule any
         // individual replay, to avoid N - 1 redundant individual compactions 
(when N is the number of nodes with hints
@@ -534,7 +534,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
                 scheduleHintDelivery(target, false);
         }
 
-        logger.debug("Finished scheduleAllDeliveries");
+        logger.trace("Finished scheduleAllDeliveries");
     }
 
     /*
@@ -548,7 +548,7 @@ public class HintedHandOffManager implements 
HintedHandOffManagerMBean
         if (!queuedDeliveries.add(to))
             return;
 
-        logger.debug("Scheduling delivery of Hints to {}", to);
+        logger.trace("Scheduling delivery of Hints to {}", to);
 
         hintDeliveryExecutor.execute(new Runnable()
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/Keyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Keyspace.java 
b/src/java/org/apache/cassandra/db/Keyspace.java
index feb3c5e..92a0950 100644
--- a/src/java/org/apache/cassandra/db/Keyspace.java
+++ b/src/java/org/apache/cassandra/db/Keyspace.java
@@ -266,7 +266,7 @@ public class Keyspace
         this.metric = new KeyspaceMetrics(this);
         for (CFMetaData cfm : new ArrayList<>(metadata.cfMetaData().values()))
         {
-            logger.debug("Initializing {}.{}", getName(), cfm.cfName);
+            logger.trace("Initializing {}.{}", getName(), cfm.cfName);
             initCf(cfm.cfId, cfm.cfName, loadSSTables);
         }
     }
@@ -420,8 +420,8 @@ public class Keyspace
      */
     public static void indexRow(DecoratedKey key, ColumnFamilyStore cfs, 
Set<String> idxNames)
     {
-        if (logger.isDebugEnabled())
-            logger.debug("Indexing row {} ", 
cfs.metadata.getKeyValidator().getString(key.getKey()));
+        if (logger.isTraceEnabled())
+            logger.trace("Indexing row {} ", 
cfs.metadata.getKeyValidator().getString(key.getKey()));
 
         try (OpOrder.Group opGroup = cfs.keyspace.writeOrder.start())
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java 
b/src/java/org/apache/cassandra/db/Memtable.java
index 1ce2b9f..e96a71e 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -360,13 +360,13 @@ public class Memtable implements Comparable<Memtable>
 
         private SSTableReader writeSortedContents(ReplayPosition context, File 
sstableDirectory)
         {
-            logger.info("Writing {}", Memtable.this.toString());
+            logger.debug("Writing {}", Memtable.this.toString());
 
             SSTableReader ssTable;
             // errors when creating the writer that may leave empty temp files.
             try (SSTableWriter writer = 
createFlushWriter(cfs.getTempSSTablePath(sstableDirectory)))
             {
-                boolean trackContention = logger.isDebugEnabled();
+                boolean trackContention = logger.isTraceEnabled();
                 int heavilyContendedRowCount = 0;
                 // (we can't clear out the map as-we-go to free up memory,
                 //  since the memtable is being used for queries in the 
"pending flush" category)
@@ -394,7 +394,7 @@ public class Memtable implements Comparable<Memtable>
 
                 if (writer.getFilePointer() > 0)
                 {
-                    logger.info(String.format("Completed flushing %s (%s) for 
commitlog position %s",
+                    logger.debug(String.format("Completed flushing %s (%s) for 
commitlog position %s",
                                               writer.getFilename(),
                                               
FBUtilities.prettyPrintMemory(writer.getOnDiskFilePointer()),
                                               context));
@@ -404,14 +404,14 @@ public class Memtable implements Comparable<Memtable>
                 }
                 else
                 {
-                    logger.info("Completed flushing {}; nothing needed to be 
retained.  Commitlog position was {}",
+                    logger.debug("Completed flushing {}; nothing needed to be 
retained.  Commitlog position was {}",
                                 writer.getFilename(), context);
                     writer.abort();
                     ssTable = null;
                 }
 
                 if (heavilyContendedRowCount > 0)
-                    logger.debug(String.format("High update contention in 
%d/%d partitions of %s ", heavilyContendedRowCount, rows.size(), 
Memtable.this.toString()));
+                    logger.trace(String.format("High update contention in 
%d/%d partitions of %s ", heavilyContendedRowCount, rows.size(), 
Memtable.this.toString()));
 
                 return ssTable;
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java 
b/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
index 79753c1..ab934c6 100644
--- a/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
@@ -39,7 +39,7 @@ public class MigrationRequestVerbHandler implements 
IVerbHandler
 
     public void doVerb(MessageIn message, int id)
     {
-        logger.debug("Received migration request from {}.", message.from);
+        logger.trace("Received migration request from {}.", message.from);
         MessageOut<Collection<Mutation>> response = new 
MessageOut<>(MessagingService.Verb.INTERNAL_RESPONSE,
                                                                      
LegacySchemaTables.convertSchemaToMutations(),
                                                                      
MigrationManager.MigrationsSerializer.instance);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/SchemaCheckVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SchemaCheckVerbHandler.java 
b/src/java/org/apache/cassandra/db/SchemaCheckVerbHandler.java
index 1a1f7a9..4270a24 100644
--- a/src/java/org/apache/cassandra/db/SchemaCheckVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/SchemaCheckVerbHandler.java
@@ -35,7 +35,7 @@ public class SchemaCheckVerbHandler implements IVerbHandler
 
     public void doVerb(MessageIn message, int id)
     {
-        logger.debug("Received schema check request.");
+        logger.trace("Received schema check request.");
         MessageOut<UUID> response = new 
MessageOut<UUID>(MessagingService.Verb.INTERNAL_RESPONSE, 
Schema.instance.getVersion(), UUIDSerializer.serializer);
         MessagingService.instance().sendReply(response, id, message.from);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java 
b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
index f054315..8bf1ef3 100644
--- a/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
+++ b/src/java/org/apache/cassandra/db/SizeEstimatesRecorder.java
@@ -57,11 +57,11 @@ public class SizeEstimatesRecorder extends 
MigrationListener implements Runnable
     {
         if (StorageService.instance.isStarting())
         {
-            logger.debug("Node has not yet joined; not recording size 
estimates");
+            logger.trace("Node has not yet joined; not recording size 
estimates");
             return;
         }
 
-        logger.debug("Recording size estimates");
+        logger.trace("Recording size estimates");
 
         // find primary token ranges for the local node.
         Collection<Token> localTokens = 
StorageService.instance.getLocalTokens();
@@ -74,7 +74,7 @@ public class SizeEstimatesRecorder extends MigrationListener 
implements Runnable
                 long start = System.nanoTime();
                 recordSizeEstimates(table, localRanges);
                 long passed = System.nanoTime() - start;
-                logger.debug("Spent {} milliseconds on estimating {}.{} size",
+                logger.trace("Spent {} milliseconds on estimating {}.{} size",
                              TimeUnit.NANOSECONDS.toMillis(passed),
                              table.metadata.ksName,
                              table.metadata.cfName);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SliceFromReadCommand.java 
b/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
index 461a3a1..edace9d 100644
--- a/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
@@ -65,7 +65,7 @@ public class SliceFromReadCommand extends ReadCommand
         // reads in order to guarantee that the static columns are fetched.  
See CASSANDRA-8502 for more details.
         if (filter.reversed && filter.hasStaticSlice(cfm))
         {
-            logger.debug("Splitting reversed slice with static columns into 
two reads");
+            logger.trace("Splitting reversed slice with static columns into 
two reads");
             Pair<SliceQueryFilter, SliceQueryFilter> newFilters = 
filter.splitOutStaticSlice(cfm);
 
             Row normalResults =  keyspace.getRow(new QueryFilter(dk, cfName, 
newFilters.right, timestamp));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java 
b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index 18af90f..308edcd 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -1110,7 +1110,7 @@ public final class SystemKeyspace
             {
                 if (dataDirectory.getName().equals("Versions") && 
dataDirectory.listFiles().length > 0)
                 {
-                    logger.debug("Found unreadable versions info in pre 1.2 
system.Versions table");
+                    logger.trace("Found unreadable versions info in pre 1.2 
system.Versions table");
                     return UNREADABLE_VERSION.toString();
                 }
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
index 0e08efe..a60c135 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
@@ -303,7 +303,7 @@ public class CommitLog implements CommitLogMBean
      */
     public void discardCompletedSegments(final UUID cfId, final ReplayPosition 
context)
     {
-        logger.debug("discard completed log segments for {}, table {}", 
context, cfId);
+        logger.trace("discard completed log segments for {}, table {}", 
context, cfId);
 
         // Go thru the active segment files, which are ordered oldest to 
newest, marking the
         // flushed CF as clean, until we reach the segment file containing the 
ReplayPosition passed
@@ -316,12 +316,12 @@ public class CommitLog implements CommitLogMBean
 
             if (segment.isUnused())
             {
-                logger.debug("Commit log segment {} is unused", segment);
+                logger.trace("Commit log segment {} is unused", segment);
                 allocator.recycleSegment(segment);
             }
             else
             {
-                logger.debug("Not safe to delete{} commit log segment {}; 
dirty is {}",
+                logger.trace("Not safe to delete{} commit log segment {}; 
dirty is {}",
                         (iter.hasNext() ? "" : " active"), segment, 
segment.dirtyString());
             }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
index 4c615e0..b734573 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
@@ -83,7 +83,7 @@ public class CommitLogArchiver
         {
             if (stream == null)
             {
-                logger.debug("No commitlog_archiving properties found; archive 
+ pitr will be disabled");
+                logger.trace("No commitlog_archiving properties found; archive 
+ pitr will be disabled");
                 return disabled();
             }
             else
@@ -237,7 +237,7 @@ public class CommitLogArchiver
                 File toFile = new 
File(DatabaseDescriptor.getCommitLogLocation(), descriptor.fileName());
                 if (toFile.exists())
                 {
-                    logger.debug("Skipping restore of archive {} as the 
segment already exists in the restore location {}",
+                    logger.trace("Skipping restore of archive {} as the 
segment already exists in the restore location {}",
                                  fromFile.getPath(), toFile.getPath());
                     continue;
                 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
index 389b111..cb02a8c 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
@@ -136,7 +136,7 @@ public class CommitLogReplayer
             cfPositions.put(cfs.metadata.cfId, rp);
         }
         ReplayPosition globalPosition = 
replayPositionOrdering.min(cfPositions.values());
-        logger.debug("Global replay position is {} from columnfamilies {}", 
globalPosition, FBUtilities.toString(cfPositions));
+        logger.trace("Global replay position is {} from columnfamilies {}", 
globalPosition, FBUtilities.toString(cfPositions));
         return new CommitLogReplayer(commitLog, globalPosition, cfPositions, 
replayFilter);
     }
 
@@ -154,7 +154,7 @@ public class CommitLogReplayer
 
         // wait for all the writes to finish on the mutation stage
         FBUtilities.waitOnFutures(futures);
-        logger.debug("Finished waiting on mutations from recovery");
+        logger.trace("Finished waiting on mutations from recovery");
 
         // flush replayed keyspaces
         futures.clear();
@@ -333,7 +333,7 @@ public class CommitLogReplayer
             {
                 int replayPos = replayEnd + CommitLogSegment.SYNC_MARKER_SIZE;
 
-                if (logger.isDebugEnabled())
+                if (logger.isTraceEnabled())
                     logger.trace("Replaying {} between {} and {}", file, 
reader.getFilePointer(), end);
                 if (compressor != null)
                 {
@@ -361,7 +361,7 @@ public class CommitLogReplayer
                     try
                     {
                         int compressedLength = end - start;
-                        if (logger.isDebugEnabled())
+                        if (logger.isTraceEnabled())
                             logger.trace("Decompressing {} between replay 
positions {} and {}",
                                          file,
                                          replayPos,
@@ -392,13 +392,13 @@ public class CommitLogReplayer
         finally
         {
             FileUtils.closeQuietly(reader);
-            logger.info("Finished reading {}", file);
+            logger.debug("Finished reading {}", file);
         }
     }
 
     public boolean logAndCheckIfShouldSkip(File file, CommitLogDescriptor desc)
     {
-        logger.info("Replaying {} (CL version {}, messaging version {}, 
compression {})",
+        logger.debug("Replaying {} (CL version {}, messaging version {}, 
compression {})",
                     file.getPath(),
                     desc.version,
                     desc.getMessagingVersion(),
@@ -406,7 +406,7 @@ public class CommitLogReplayer
 
         if (globalPosition.segment > desc.id)
         {
-            logger.debug("skipping replay of fully-flushed {}", file);
+            logger.trace("skipping replay of fully-flushed {}", file);
             return true;
         }
         return false;
@@ -423,7 +423,7 @@ public class CommitLogReplayer
         while (reader.getFilePointer() < end && !reader.isEOF())
         {
             long mutationStart = reader.getFilePointer();
-            if (logger.isDebugEnabled())
+            if (logger.isTraceEnabled())
                 logger.trace("Reading mutation at {}", mutationStart);
 
             long claimedCRC32;
@@ -434,7 +434,7 @@ public class CommitLogReplayer
                 serializedSize = reader.readInt();
                 if (serializedSize == LEGACY_END_OF_SEGMENT_MARKER)
                 {
-                    logger.debug("Encountered end of segment marker at {}", 
reader.getFilePointer());
+                    logger.trace("Encountered end of segment marker at {}", 
reader.getFilePointer());
                     return false;
                 }
 
@@ -551,8 +551,8 @@ public class CommitLogReplayer
             return;
         }
 
-        if (logger.isDebugEnabled())
-            logger.debug("replaying mutation for {}.{}: {}", 
mutation.getKeyspaceName(), ByteBufferUtil.bytesToHex(mutation.key()), "{" + 
StringUtils.join(mutation.getColumnFamilies().iterator(), ", ") + "}");
+        if (logger.isTraceEnabled())
+            logger.trace("replaying mutation for {}.{}: {}", 
mutation.getKeyspaceName(), ByteBufferUtil.bytesToHex(mutation.key()), "{" + 
StringUtils.join(mutation.getColumnFamilies().iterator(), ", ") + "}");
 
         Runnable runnable = new WrappedRunnable()
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java 
b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
index 5918474..f71bb1b 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManager.java
@@ -117,7 +117,7 @@ public class CommitLogSegmentManager
                             // if we have no more work to do, check if we 
should create a new segment
                             if (availableSegments.isEmpty() && 
(activeSegments.isEmpty() || createReserveSegments))
                             {
-                                logger.debug("No segments in reserve; creating 
a fresh one");
+                                logger.trace("No segments in reserve; creating 
a fresh one");
                                 // TODO : some error handling in case we fail 
to create a new segment
                                 
availableSegments.add(CommitLogSegment.createSegment(commitLog));
                                 hasAvailableSegments.signalAll();
@@ -354,7 +354,7 @@ public class CommitLogSegmentManager
     void recycleSegment(final File file)
     {
         // (don't decrease managed size, since this was never a "live" segment)
-        logger.debug("(Unopened) segment {} is no longer needed and will be 
deleted now", file);
+        logger.trace("(Unopened) segment {} is no longer needed and will be 
deleted now", file);
         FileUtils.deleteWithConfirm(file);
     }
 
@@ -365,7 +365,7 @@ public class CommitLogSegmentManager
      */
     private void discardSegment(final CommitLogSegment segment, final boolean 
deleteFile)
     {
-        logger.debug("Segment {} is no longer active and will be deleted {}", 
segment, deleteFile ? "now" : "by the archive script");
+        logger.trace("Segment {} is no longer active and will be deleted {}", 
segment, deleteFile ? "now" : "by the archive script");
 
         segmentManagementTasks.add(new Runnable()
         {
@@ -397,7 +397,7 @@ public class CommitLogSegmentManager
     {
         long total = DatabaseDescriptor.getTotalCommitlogSpaceInMB() * 1024 * 
1024;
         long currentSize = size.get();
-        logger.debug("Total active commitlog segment space used is {} out of 
{}", currentSize, total);
+        logger.trace("Total active commitlog segment space used is {} out of 
{}", currentSize, total);
         return total - currentSize;
     }
 
@@ -446,7 +446,7 @@ public class CommitLogSegmentManager
                 {
                     // even though we remove the schema entry before a final 
flush when dropping a CF,
                     // it's still possible for a writer to race and finish his 
append after the flush.
-                    logger.debug("Marking clean CF {} that doesn't exist 
anymore", dirtyCFId);
+                    logger.trace("Marking clean CF {} that doesn't exist 
anymore", dirtyCFId);
                     segment.markClean(dirtyCFId, segment.getContext());
                 }
                 else if (!flushes.containsKey(dirtyCFId))
@@ -469,7 +469,7 @@ public class CommitLogSegmentManager
      */
     public void stopUnsafe(boolean deleteSegments)
     {
-        logger.debug("CLSM closing and clearing existing commit log 
segments...");
+        logger.trace("CLSM closing and clearing existing commit log 
segments...");
         createReserveSegments = false;
 
         awaitManagementTasksCompletion();
@@ -498,7 +498,7 @@ public class CommitLogSegmentManager
 
         size.set(0L);
 
-        logger.debug("CLSM done with closing and clearing existing commit log 
segments.");
+        logger.trace("CLSM done with closing and clearing existing commit log 
segments.");
     }
 
     // Used by tests only.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/CompactionController.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/db/compaction/CompactionController.java 
b/src/java/org/apache/cassandra/db/compaction/CompactionController.java
index 81d8b7c..5f0a198 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionController.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionController.java
@@ -110,7 +110,7 @@ public class CompactionController implements AutoCloseable
      */
     public static Set<SSTableReader> getFullyExpiredSSTables(ColumnFamilyStore 
cfStore, Iterable<SSTableReader> compacting, Iterable<SSTableReader> 
overlapping, int gcBefore)
     {
-        logger.debug("Checking droppable sstables in {}", cfStore);
+        logger.trace("Checking droppable sstables in {}", cfStore);
 
         if (compacting == null)
             return Collections.<SSTableReader>emptySet();
@@ -150,7 +150,7 @@ public class CompactionController implements AutoCloseable
             }
             else
             {
-               logger.debug("Dropping expired SSTable {} 
(maxLocalDeletionTime={}, gcBefore={})",
+               logger.trace("Dropping expired SSTable {} 
(maxLocalDeletionTime={}, gcBefore={})",
                         candidate, 
candidate.getSSTableMetadata().maxLocalDeletionTime, gcBefore);
             }
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java 
b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index 7def98d..ea20a1f 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -153,19 +153,19 @@ public class CompactionManager implements 
CompactionManagerMBean
     {
         if (cfs.isAutoCompactionDisabled())
         {
-            logger.debug("Autocompaction is disabled");
+            logger.trace("Autocompaction is disabled");
             return Collections.emptyList();
         }
 
         int count = compactingCF.count(cfs);
         if (count > 0 && executor.getActiveCount() >= 
executor.getMaximumPoolSize())
         {
-            logger.debug("Background compaction is still running for {}.{} ({} 
remaining). Skipping",
+            logger.trace("Background compaction is still running for {}.{} ({} 
remaining). Skipping",
                          cfs.keyspace.getName(), cfs.name, count);
             return Collections.emptyList();
         }
 
-        logger.debug("Scheduling a background task check for {}.{} with {}",
+        logger.trace("Scheduling a background task check for {}.{} with {}",
                      cfs.keyspace.getName(),
                      cfs.name,
                      cfs.getCompactionStrategy().getName());
@@ -211,10 +211,10 @@ public class CompactionManager implements 
CompactionManagerMBean
         {
             try
             {
-                logger.debug("Checking {}.{}", cfs.keyspace.getName(), 
cfs.name);
+                logger.trace("Checking {}.{}", cfs.keyspace.getName(), 
cfs.name);
                 if (!cfs.isValid())
                 {
-                    logger.debug("Aborting compaction for dropped CF");
+                    logger.trace("Aborting compaction for dropped CF");
                     return;
                 }
 
@@ -222,7 +222,7 @@ public class CompactionManager implements 
CompactionManagerMBean
                 AbstractCompactionTask task = 
strategy.getNextBackgroundTask(getDefaultGcBefore(cfs));
                 if (task == null)
                 {
-                    logger.debug("No tasks available");
+                    logger.trace("No tasks available");
                     return;
                 }
                 task.execute(metrics);
@@ -461,7 +461,7 @@ public class CompactionManager implements 
CompactionManagerMBean
                                       long repairedAt) throws 
InterruptedException, IOException
     {
         logger.info("Starting anticompaction for {}.{} on {}/{} sstables", 
cfs.keyspace.getName(), cfs.getColumnFamilyName(), validatedForRepair.size(), 
cfs.getSSTables().size());
-        logger.debug("Starting anticompaction for ranges {}", ranges);
+        logger.trace("Starting anticompaction for ranges {}", ranges);
         Set<SSTableReader> sstables = new HashSet<>(validatedForRepair);
         Set<SSTableReader> mutatedRepairStatuses = new HashSet<>();
         Set<SSTableReader> nonAnticompacting = new HashSet<>();
@@ -780,7 +780,7 @@ public class CompactionManager implements 
CompactionManagerMBean
         }
         if (!needsCleanup(sstable, ranges))
         {
-            logger.debug("Skipping {} for cleanup; all rows should be kept", 
sstable);
+            logger.trace("Skipping {} for cleanup; all rows should be kept", 
sstable);
             return;
         }
 
@@ -790,8 +790,8 @@ public class CompactionManager implements 
CompactionManagerMBean
 
         long expectedBloomFilterSize = 
Math.max(cfs.metadata.getMinIndexInterval(),
                                                
SSTableReader.getApproximateKeyCount(txn.originals()));
-        if (logger.isDebugEnabled())
-            logger.debug("Expected bloom filter size : {}", 
expectedBloomFilterSize);
+        if (logger.isTraceEnabled())
+            logger.trace("Expected bloom filter size : {}", 
expectedBloomFilterSize);
 
         logger.info("Cleaning up {}", sstable);
 
@@ -1110,11 +1110,11 @@ public class CompactionManager implements 
CompactionManagerMBean
                 }
             }
 
-            if (logger.isDebugEnabled())
+            if (logger.isTraceEnabled())
             {
                 // MT serialize may take time
                 long duration = 
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
-                logger.debug("Validation finished in {} msec, depth {} for {} 
keys, serialized size {} bytes for {}",
+                logger.trace("Validation finished in {} msec, depth {} for {} 
keys, serialized size {} bytes for {}",
                              duration,
                              depth,
                              numPartitions,
@@ -1243,7 +1243,7 @@ public class CompactionManager implements 
CompactionManagerMBean
             repairedSSTableWriter.commit();
             unRepairedSSTableWriter.commit();
 
-            logger.debug("Repaired {} keys out of {} for {}/{} in {}", 
repairedKeyCount,
+            logger.trace("Repaired {} keys out of {} for {}/{} in {}", 
repairedKeyCount,
                                                                        
repairedKeyCount + unrepairedKeyCount,
                                                                        
cfs.keyspace.getName(),
                                                                        
cfs.getColumnFamilyName(),
@@ -1295,7 +1295,7 @@ public class CompactionManager implements 
CompactionManagerMBean
             {
                 if (!AutoSavingCache.flushInProgress.add(writer.cacheType()))
                 {
-                    logger.debug("Cache flushing was already in progress: 
skipping {}", writer.getCompactionInfo());
+                    logger.trace("Cache flushing was already in progress: 
skipping {}", writer.getCompactionInfo());
                     return;
                 }
                 try
@@ -1417,7 +1417,7 @@ public class CompactionManager implements 
CompactionManagerMBean
                     if (t.getSuppressed() != null && t.getSuppressed().length 
> 0)
                         logger.warn("Interruption of compaction encountered 
exceptions:", t);
                     else
-                        logger.debug("Full interruption stack trace:", t);
+                        logger.trace("Full interruption stack trace:", t);
                 }
                 else
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java 
b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index ea64fb2..575c326 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -139,7 +139,7 @@ public class CompactionTask extends AbstractCompactionTask
         }
         ssTableLoggerMsg.append("]");
         String taskIdLoggerMsg = taskId == null ? 
UUIDGen.getTimeUUID().toString() : taskId.toString();
-        logger.info("Compacting ({}) {}", taskIdLoggerMsg, ssTableLoggerMsg);
+        logger.debug("Compacting ({}) {}", taskIdLoggerMsg, ssTableLoggerMsg);
 
         long start = System.nanoTime();
 
@@ -221,10 +221,10 @@ public class CompactionTask extends AbstractCompactionTask
             double mbps = dTime > 0 ? (double) endsize / (1024 * 1024) / 
((double) dTime / 1000) : 0;
             long totalSourceRows = 0;
             String mergeSummary = 
updateCompactionHistory(cfs.keyspace.getName(), cfs.getColumnFamilyName(), ci, 
startsize, endsize);
-            logger.info(String.format("Compacted (%s) %d sstables to [%s] to 
level=%d.  %,d bytes to %,d (~%d%% of original) in %,dms = %fMB/s.  %,d total 
partitions merged to %,d.  Partition merge counts were {%s}",
+            logger.debug(String.format("Compacted (%s) %d sstables to [%s] to 
level=%d.  %,d bytes to %,d (~%d%% of original) in %,dms = %fMB/s.  %,d total 
partitions merged to %,d.  Partition merge counts were {%s}",
                                       taskIdLoggerMsg, 
transaction.originals().size(), newSSTableNames.toString(), getLevel(), 
startsize, endsize, (int) (ratio * 100), dTime, mbps, totalSourceRows, 
totalKeysWritten, mergeSummary));
-            logger.debug(String.format("CF Total Bytes Compacted: %,d", 
CompactionTask.addToTotalBytesCompacted(endsize)));
-            logger.debug("Actual #keys: {}, Estimated #keys:{}, Err%: {}", 
totalKeysWritten, estimatedKeys, ((double)(totalKeysWritten - 
estimatedKeys)/totalKeysWritten));
+            logger.trace(String.format("CF Total Bytes Compacted: %,d", 
CompactionTask.addToTotalBytesCompacted(endsize)));
+            logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", 
totalKeysWritten, estimatedKeys, ((double)(totalKeysWritten - 
estimatedKeys)/totalKeysWritten));
 
             if (offline)
                 Refs.release(Refs.selfRefs(newSStables));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a849efe/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
----------------------------------------------------------------------
diff --git 
a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java 
b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
index 0956962..da2d35d 100644
--- 
a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
+++ 
b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
@@ -50,10 +50,10 @@ public class DateTieredCompactionStrategy extends 
AbstractCompactionStrategy
         if 
(!options.containsKey(AbstractCompactionStrategy.TOMBSTONE_COMPACTION_INTERVAL_OPTION)
 && !options.containsKey(AbstractCompactionStrategy.TOMBSTONE_THRESHOLD_OPTION))
         {
             disableTombstoneCompactions = true;
-            logger.debug("Disabling tombstone compactions for DTCS");
+            logger.trace("Disabling tombstone compactions for DTCS");
         }
         else
-            logger.debug("Enabling tombstone compactions for DTCS");
+            logger.trace("Enabling tombstone compactions for DTCS");
 
     }
 
@@ -99,7 +99,7 @@ public class DateTieredCompactionStrategy extends 
AbstractCompactionStrategy
         List<SSTableReader> compactionCandidates = new 
ArrayList<>(getNextNonExpiredSSTables(Sets.difference(candidates, expired), 
gcBefore));
         if (!expired.isEmpty())
         {
-            logger.debug("Including expired sstables: {}", expired);
+            logger.trace("Including expired sstables: {}", expired);
             compactionCandidates.addAll(expired);
         }
         return compactionCandidates;
@@ -134,7 +134,7 @@ public class DateTieredCompactionStrategy extends 
AbstractCompactionStrategy
         Iterable<SSTableReader> candidates = 
filterOldSSTables(Lists.newArrayList(candidateSSTables), options.maxSSTableAge, 
now);
 
         List<List<SSTableReader>> buckets = 
getBuckets(createSSTableAndMinTimestampPairs(candidates), options.baseTime, 
base, now);
-        logger.debug("Compaction buckets are {}", buckets);
+        logger.trace("Compaction buckets are {}", buckets);
         updateEstimatedCompactionsByTasks(buckets);
         List<SSTableReader> mostInteresting = newestBucket(buckets,
                                                            
cfs.getMinimumCompactionThreshold(),
@@ -391,7 +391,7 @@ public class DateTieredCompactionStrategy extends 
AbstractCompactionStrategy
         LifecycleTransaction modifier = cfs.getTracker().tryModify(sstables, 
OperationType.COMPACTION);
         if (modifier == null)
         {
-            logger.debug("Unable to mark {} for compaction; probably a 
background compaction got to it first.  You can disable background compactions 
temporarily if this is a problem", sstables);
+            logger.trace("Unable to mark {} for compaction; probably a 
background compaction got to it first.  You can disable background compactions 
temporarily if this is a problem", sstables);
             return null;
         }
 

Reply via email to