[
https://issues.apache.org/jira/browse/CASSANDRA-16120?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17193385#comment-17193385
]
David Capwell commented on CASSANDRA-16120:
-------------------------------------------
Also had to update test/conf/logback-dtest.xml
{code}
diff --git a/test/conf/logback-dtest.xml b/test/conf/logback-dtest.xml
index 370e1e5bb2..52eaf335de 100644
--- a/test/conf/logback-dtest.xml
+++ b/test/conf/logback-dtest.xml
@@ -18,35 +18,18 @@
-->
<configuration debug="false" scan="true" scanPeriod="60 seconds">
+ <define name="cluster_id"
class="org.apache.cassandra.distributed.impl.ClusterIDDefiner" />
<define name="instance_id"
class="org.apache.cassandra.distributed.impl.InstanceIDDefiner" />
<!-- Shutdown hook ensures that async appender flushes -->
<shutdownHook class="ch.qos.logback.core.hook.DelayingShutdownHook"/>
- <appender name="INSTANCEFILE"
class="ch.qos.logback.core.rolling.RollingFileAppender">
-
- <file>./build/test/logs/${cassandra.testtag}/TEST-${suitename}.log</file>
- <rollingPolicy
class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
-
<fileNamePattern>./build/test/logs/${cassandra.testtag}/TEST-${suitename}.log.%i.gz</fileNamePattern>
- <minIndex>1</minIndex>
- <maxIndex>20</maxIndex>
- </rollingPolicy>
-
- <triggeringPolicy
class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
- <maxFileSize>20MB</maxFileSize>
- </triggeringPolicy>
-
+ <appender name="INSTANCEFILE" class="ch.qos.logback.core.FileAppender">
+
<file>./build/test/logs/${cassandra.testtag}/${suitename}/${cluster_id}/${instance_id}/system.log</file>
<encoder>
<pattern>%-5level [%thread] ${instance_id} %date{ISO8601}
%msg%n</pattern>
</encoder>
- <immediateFlush>false</immediateFlush>
- </appender>
-
- <appender name="INSTANCEASYNCFILE"
class="ch.qos.logback.classic.AsyncAppender">
- <discardingThreshold>0</discardingThreshold>
- <maxFlushTime>0</maxFlushTime>
- <queueSize>1024</queueSize>
- <appender-ref ref="INSTANCEFILE"/>
+ <immediateFlush>true</immediateFlush>
</appender>
<appender name="INSTANCESTDERR" target="System.err"
class="ch.qos.logback.core.ConsoleAppender">
@@ -70,7 +53,7 @@
<logger name="org.apache.hadoop" level="WARN"/>
<root level="DEBUG">
- <appender-ref ref="INSTANCEASYNCFILE" />
+ <appender-ref ref="INSTANCEFILE" /> <!-- use blocking to avoid race
conditions with appending and searching -->
<appender-ref ref="INSTANCESTDERR" />
<appender-ref ref="INSTANCESTDOUT" />
</root>
{code}
> Add ability for jvm-dtest to grep instance logs
> -----------------------------------------------
>
> Key: CASSANDRA-16120
> URL: https://issues.apache.org/jira/browse/CASSANDRA-16120
> Project: Cassandra
> Issue Type: Improvement
> Components: Test/dtest/java
> Reporter: David Capwell
> Assignee: David Capwell
> Priority: Normal
> Labels: pull-request-available
> Fix For: 4.0-beta
>
>
> One of the main gaps between python dtest and jvm dtest is python dtest
> supports the ability to grep the logs of an instance; we need this capability
> as some tests require validating logs were triggered.
> Pydocs for common log methods
> {code}
> | grep_log(self, expr, filename='system.log', from_mark=None)
> | Returns a list of lines matching the regular expression in parameter
> | in the Cassandra log of this node
> |
> | grep_log_for_errors(self, filename='system.log')
> | Returns a list of errors with stack traces
> | in the Cassandra log of this node
> |
> | grep_log_for_errors_from(self, filename='system.log', seek_start=0)
> {code}
> {code}
> | watch_log_for(self, exprs, from_mark=None, timeout=600, process=None,
> verbose=False, filename='system.log')
> | Watch the log until one or more (regular) expression are found.
> | This methods when all the expressions have been found or the method
> | timeouts (a TimeoutError is then raised). On successful completion,
> | a list of pair (line matched, match object) is returned.
> {code}
> Below is a POC showing a way to do such logic
> {code}
> package org.apache.cassandra.distributed.test;
> import java.io.BufferedReader;
> import java.io.FileInputStream;
> import java.io.IOException;
> import java.io.InputStreamReader;
> import java.io.UncheckedIOException;
> import java.nio.charset.StandardCharsets;
> import java.util.Iterator;
> import java.util.Spliterator;
> import java.util.Spliterators;
> import java.util.regex.Matcher;
> import java.util.regex.Pattern;
> import java.util.stream.Stream;
> import java.util.stream.StreamSupport;
> import com.google.common.io.Closeables;
> import org.junit.Test;
> import org.apache.cassandra.distributed.Cluster;
> import org.apache.cassandra.utils.AbstractIterator;
> public class AllTheLogs extends TestBaseImpl
> {
> @Test
> public void test() throws IOException
> {
> try (final Cluster cluster = init(Cluster.build(1).start()))
> {
> String tag = System.getProperty("cassandra.testtag",
> "cassandra.testtag_IS_UNDEFINED");
> String suite = System.getProperty("suitename",
> "suitename_IS_UNDEFINED");
> String log = String.format("build/test/logs/%s/TEST-%s.log", tag,
> suite);
> grep(log, "Enqueuing flush of tables").forEach(l ->
> System.out.println("I found the thing: " + l));
> }
> }
> private static Stream<String> grep(String file, String regex) throws
> IOException
> {
> return grep(file, Pattern.compile(regex));
> }
> private static Stream<String> grep(String file, Pattern regex) throws
> IOException
> {
> BufferedReader reader = new BufferedReader(new InputStreamReader(new
> FileInputStream(file), StandardCharsets.UTF_8));
> Iterator<String> it = new AbstractIterator<String>()
> {
> protected String computeNext()
> {
> try
> {
> String s;
> while ((s = reader.readLine()) != null)
> {
> Matcher m = regex.matcher(s);
> if (m.find())
> return s;
> }
> reader.close();
> return endOfData();
> }
> catch (IOException e)
> {
> Closeables.closeQuietly(reader);
> throw new UncheckedIOException(e);
> }
> }
> };
> return StreamSupport.stream(Spliterators.spliteratorUnknownSize(it,
> Spliterator.ORDERED), false);
> }
> }
> {code}
> And
> {code}
> @Test
> public void test() throws IOException
> {
> try (final Cluster cluster = init(Cluster.build(1).start()))
> {
> String tag = System.getProperty("cassandra.testtag",
> "cassandra.testtag_IS_UNDEFINED");
> String suite = System.getProperty("suitename",
> "suitename_IS_UNDEFINED");
> //TODO missing way to get node id
> // cluster.get(1);
> String log =
> String.format("build/test/logs/%s/TEST-%s-node%d.log", tag, suite, 1);
> grep(log, "Enqueuing flush of tables").forEach(l ->
> System.out.println("I found the thing: " + l));
> }
> }
> {code}
--
This message was sent by Atlassian Jira
(v8.3.4#803005)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]