[22/50] logging-log4j2 git commit: LOG4J2-1297 latency test javadoc
LOG4J2-1297 latency test javadoc Project: http://git-wip-us.apache.org/repos/asf/logging-log4j2/repo Commit: http://git-wip-us.apache.org/repos/asf/logging-log4j2/commit/3c37ca34 Tree: http://git-wip-us.apache.org/repos/asf/logging-log4j2/tree/3c37ca34 Diff: http://git-wip-us.apache.org/repos/asf/logging-log4j2/diff/3c37ca34 Branch: refs/heads/LOG4J2-1365 Commit: 3c37ca34db7ada0d7bb20de5a3cd1caf4f4eced7 Parents: c5f5cc9 Author: rpopmaAuthored: Sun Apr 17 22:42:54 2016 +0900 Committer: rpopma Committed: Sun Apr 17 22:42:54 2016 +0900 -- .../core/async/perftest/SimpleLatencyTest.java | 31 ++-- 1 file changed, 29 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/logging-log4j2/blob/3c37ca34/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java -- diff --git a/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java b/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java index f7459b6..1a306b1 100644 --- a/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java +++ b/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java @@ -26,7 +26,34 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; /** - * + * Latency test. + * + * See https://groups.google.com/d/msg/mechanical-sympathy/0gaBXxFm4hE/O9QomwHIJAAJ;>https://groups.google.com/d/msg/mechanical-sympathy/0gaBXxFm4hE/O9QomwHIJAAJ: + * + * Gil Tene's rules of thumb for latency tests: + * + * DO measure max achievable throughput, but DON'T get focused on it as the main or single axis of measurement / + * comparison. + * DO measure response time / latency behaviors across a spectrum of attempted load levels (e.g. at attempted loads + * between 2% to 100%+ of max established thoughout). + * DO measure the response time / latency spectrum for each tested load (even for max throughout, for which response + * time should linearly grow with test length, or the test is wrong). HdrHistogram is one good way to capture this + * information. + * DO make sure you are measuring response time correctly and labeling it right. If you also measure and report + * service time, label it as such (don't call it "latency"). + * DO compare response time / latency spectrum at given loads. + * DO [repeatedly] sanity check and calibrate the benchmark setup to verify that it produces expected results for + * known forced scenarios. E.g. forced pauses of known size via ^Z or SIGSTOP/SIGCONT should produce expected response + * time percentile levels. Attempting to load at >100% than achieved throughput should result in response time / latency + * measurements that grow with benchmark run length, while service time (if measured) should remain fairly flat well + * past saturation. + * DON'T use or report standard deviation for latency. Ever. Except if you mean it as a joke. + * DON'T use average latency as a way to compare things with one another. [use median or 90%'ile instead, if what + * you want to compare is "common case" latencies]. Consider not reporting avg. at all. + * DON'T compare results of different setups or loads from short runs (< 20-30 minutes). + * DON'T include process warmup behavior (e.g. 1st minute and 1st 50K messages) in compared or reported results. + * + * */ public class SimpleLatencyTest { private static final String LATENCY_MSG = new String(new char[64]); @@ -61,7 +88,7 @@ public class SimpleLatencyTest { List histograms = new ArrayList<>(threadCount); -for (int i = 0 ; i < 30; i++) { +for (int i = 0; i < 30; i++) { final int ITERATIONS = 100 * 1000;// * 30; runLatencyTest(logger, ITERATIONS, interval, idleStrategy, histograms, nanoTimeCost, threadCount);
logging-log4j2 git commit: LOG4J2-1297 latency test javadoc
Repository: logging-log4j2 Updated Branches: refs/heads/master c5f5cc9f2 -> 3c37ca34d LOG4J2-1297 latency test javadoc Project: http://git-wip-us.apache.org/repos/asf/logging-log4j2/repo Commit: http://git-wip-us.apache.org/repos/asf/logging-log4j2/commit/3c37ca34 Tree: http://git-wip-us.apache.org/repos/asf/logging-log4j2/tree/3c37ca34 Diff: http://git-wip-us.apache.org/repos/asf/logging-log4j2/diff/3c37ca34 Branch: refs/heads/master Commit: 3c37ca34db7ada0d7bb20de5a3cd1caf4f4eced7 Parents: c5f5cc9 Author: rpopmaAuthored: Sun Apr 17 22:42:54 2016 +0900 Committer: rpopma Committed: Sun Apr 17 22:42:54 2016 +0900 -- .../core/async/perftest/SimpleLatencyTest.java | 31 ++-- 1 file changed, 29 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/logging-log4j2/blob/3c37ca34/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java -- diff --git a/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java b/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java index f7459b6..1a306b1 100644 --- a/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java +++ b/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java @@ -26,7 +26,34 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; /** - * + * Latency test. + * + * See https://groups.google.com/d/msg/mechanical-sympathy/0gaBXxFm4hE/O9QomwHIJAAJ;>https://groups.google.com/d/msg/mechanical-sympathy/0gaBXxFm4hE/O9QomwHIJAAJ: + * + * Gil Tene's rules of thumb for latency tests: + * + * DO measure max achievable throughput, but DON'T get focused on it as the main or single axis of measurement / + * comparison. + * DO measure response time / latency behaviors across a spectrum of attempted load levels (e.g. at attempted loads + * between 2% to 100%+ of max established thoughout). + * DO measure the response time / latency spectrum for each tested load (even for max throughout, for which response + * time should linearly grow with test length, or the test is wrong). HdrHistogram is one good way to capture this + * information. + * DO make sure you are measuring response time correctly and labeling it right. If you also measure and report + * service time, label it as such (don't call it "latency"). + * DO compare response time / latency spectrum at given loads. + * DO [repeatedly] sanity check and calibrate the benchmark setup to verify that it produces expected results for + * known forced scenarios. E.g. forced pauses of known size via ^Z or SIGSTOP/SIGCONT should produce expected response + * time percentile levels. Attempting to load at >100% than achieved throughput should result in response time / latency + * measurements that grow with benchmark run length, while service time (if measured) should remain fairly flat well + * past saturation. + * DON'T use or report standard deviation for latency. Ever. Except if you mean it as a joke. + * DON'T use average latency as a way to compare things with one another. [use median or 90%'ile instead, if what + * you want to compare is "common case" latencies]. Consider not reporting avg. at all. + * DON'T compare results of different setups or loads from short runs (< 20-30 minutes). + * DON'T include process warmup behavior (e.g. 1st minute and 1st 50K messages) in compared or reported results. + * + * */ public class SimpleLatencyTest { private static final String LATENCY_MSG = new String(new char[64]); @@ -61,7 +88,7 @@ public class SimpleLatencyTest { List histograms = new ArrayList<>(threadCount); -for (int i = 0 ; i < 30; i++) { +for (int i = 0; i < 30; i++) { final int ITERATIONS = 100 * 1000;// * 30; runLatencyTest(logger, ITERATIONS, interval, idleStrategy, histograms, nanoTimeCost, threadCount);