You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@logging.apache.org by rg...@apache.org on 2016/04/26 07:48:14 UTC

[04/38] logging-log4j2 git commit: cleanup

cleanup


Project: http://git-wip-us.apache.org/repos/asf/logging-log4j2/repo
Commit: http://git-wip-us.apache.org/repos/asf/logging-log4j2/commit/d8161086
Tree: http://git-wip-us.apache.org/repos/asf/logging-log4j2/tree/d8161086
Diff: http://git-wip-us.apache.org/repos/asf/logging-log4j2/diff/d8161086

Branch: refs/heads/master
Commit: d8161086890d99226593408172530583a9e00497
Parents: 02cb0fd
Author: rpopma <rp...@apache.org>
Authored: Thu Apr 21 14:34:23 2016 +0900
Committer: Ralph Goers <rg...@nextiva.com>
Committed: Mon Apr 25 21:30:27 2016 -0700

----------------------------------------------------------------------
 .../core/async/perftest/SimpleLatencyTest.java  | 30 +-------------------
 1 file changed, 1 insertion(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/logging-log4j2/blob/d8161086/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java
----------------------------------------------------------------------
diff --git a/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java b/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java
index cad4e55..8665d5d 100644
--- a/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java
+++ b/log4j-core/src/test/java/org/apache/logging/log4j/core/async/perftest/SimpleLatencyTest.java
@@ -31,33 +31,6 @@ import org.apache.logging.log4j.core.async.EventRoute;
 
 /**
  * Latency test.
- * <p>
- * See <a href="https://groups.google.com/d/msg/mechanical-sympathy/0gaBXxFm4hE/O9QomwHIJAAJ">https://groups.google.com/d/msg/mechanical-sympathy/0gaBXxFm4hE/O9QomwHIJAAJ</a>:
- * </p>
- * <p>Gil Tene's rules of thumb for latency tests:</p>
- * <ol>
- * <li>DO measure max achievable throughput, but DON'T get focused on it as the main or single axis of measurement /
- * comparison.</li>
- * <li>DO measure response time / latency behaviors across a spectrum of attempted load levels (e.g. at attempted loads
- * between 2% to 100%+ of max established thoughout).</li>
- * <li>DO measure the response time / latency spectrum for each tested load (even for max throughout, for which response
- * time should linearly grow with test length, or the test is wrong). HdrHistogram is one good way to capture this
- * information.</li>
- * <li>DO make sure you are measuring response time correctly and labeling it right. If you also measure and report
- * service time, label it as such (don't call it "latency").
- * <li>DO compare response time / latency spectrum at given loads.</li>
- * <li>DO [repeatedly] sanity check and calibrate the benchmark setup to verify that it produces expected results for
- * known forced scenarios. E.g. forced pauses of known size via ^Z or SIGSTOP/SIGCONT should produce expected response
- * time percentile levels. Attempting to load at >100% than achieved throughput should result in response time / latency
- * measurements that grow with benchmark run length, while service time (if measured) should remain fairly flat well
- * past saturation.</li>
- * <li>DON'T use or report standard deviation for latency. Ever. Except if you mean it as a joke.</li>
- * <li>DON'T use average latency as a way to compare things with one another. [use median or 90%'ile instead, if what
- * you want to compare is "common case" latencies]. Consider not reporting avg. at all.</li>
- * <li>DON'T compare results of different setups or loads from short runs (< 20-30 minutes).</li>
- * <li>DON'T include process warmup behavior (e.g. 1st minute and 1st 50K messages) in compared or reported results.
- * </li>
- * </ol>
  */
 // -DAsyncLogger.WaitStrategy=busywait
 //-XX:+UnlockDiagnosticVMOptions -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationConcurrentTime -XX:+PrintGCApplicationStoppedTime
@@ -69,7 +42,6 @@ public class SimpleLatencyTest {
             System.out.println("Please specify thread count and interval (us)");
             return;
         }
-//        double targetLoadLevel = Double.parseDouble(args[0]);
         final int threadCount = Integer.parseInt(args[0]);
         final int intervalMicros = Integer.parseInt(args[1]);
 
@@ -101,7 +73,7 @@ public class SimpleLatencyTest {
         List<Histogram> histograms = new ArrayList<>(threadCount);
 
         final long TEST_DURATION_MILLIS = TimeUnit.MINUTES.toMillis(4);
-        final int COUNT = 5000000 / threadCount;
+        final int COUNT = (5000 * 1000) / threadCount;
         runLatencyTest(logger, TEST_DURATION_MILLIS, COUNT, interval, idleStrategy, histograms, nanoTimeCost, threadCount);
         long end = System.currentTimeMillis();