This is an automated email from the ASF dual-hosted git repository.

domgarguilo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo-testing.git


The following commit(s) were added to refs/heads/main by this push:
     new 86536bb  Add units to performance test results (#156)
86536bb is described below

commit 86536bb7414427c9f4146188788f1dfabd069892
Author: Dom G <47725857+domgargu...@users.noreply.github.com>
AuthorDate: Fri Sep 24 11:23:03 2021 -0400

    Add units to performance test results (#156)
    
    * Add units to PT output
    * Added constants and corrections
    * Updated results descriptions
---
 .../accumulo/testing/performance/Report.java       | 26 +++----
 .../accumulo/testing/performance/Result.java       |  9 ++-
 .../performance/tests/ConditionalMutationsPT.java  | 38 +++++-----
 .../performance/tests/DurabilityWriteSpeedPT.java  |  5 +-
 .../testing/performance/tests/GroupCommitPT.java   |  8 +--
 .../testing/performance/tests/HerdingPT.java       |  8 ++-
 .../performance/tests/HighSplitCreationPT.java     |  4 +-
 .../performance/tests/RandomCachedLookupsPT.java   | 81 ++++++++++++----------
 .../testing/performance/tests/RollWALPT.java       | 11 ++-
 .../testing/performance/tests/ScanExecutorPT.java  | 26 ++++---
 .../performance/tests/ScanFewFamiliesPT.java       | 21 +++---
 .../performance/tests/SplitBalancingPT.java        |  2 +-
 .../tests/TableDeletionDuringSplitPT.java          | 11 +--
 .../performance/tests/YieldingScanExecutorPT.java  | 34 +++++----
 14 files changed, 159 insertions(+), 125 deletions(-)

diff --git a/src/main/java/org/apache/accumulo/testing/performance/Report.java 
b/src/main/java/org/apache/accumulo/testing/performance/Report.java
index f80a80b..fb33d4a 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/Report.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/Report.java
@@ -55,35 +55,37 @@ public class Report {
       return this;
     }
 
-    public Builder result(String id, LongSummaryStatistics stats, String 
description) {
+    public Builder result(String id, LongSummaryStatistics stats, String units,
+        String description) {
       results.add(new Result(id, new Stats(stats.getMin(), stats.getMax(), 
stats.getSum(),
-          stats.getAverage(), stats.getCount()), description, 
Purpose.COMPARISON));
+          stats.getAverage(), stats.getCount()), units, description, 
Purpose.COMPARISON));
       return this;
     }
 
-    public Builder result(String id, Number data, String description) {
-      results.add(new Result(id, data, description, Purpose.COMPARISON));
+    public Builder result(String id, Number data, String units, String 
description) {
+      results.add(new Result(id, data, units, description, 
Purpose.COMPARISON));
       return this;
     }
 
-    public Builder result(String id, long amount, long time, String 
description) {
-      results.add(new Result(id, amount / (time / 1000.0), description, 
Purpose.COMPARISON));
+    public Builder result(String id, long amount, long time, String units, 
String description) {
+      results.add(new Result(id, amount / (time / 1000.0), units, description, 
Purpose.COMPARISON));
       return this;
     }
 
-    public Builder info(String id, LongSummaryStatistics stats, String 
description) {
+    public Builder info(String id, LongSummaryStatistics stats, String units, 
String description) {
       results.add(new Result(id, new Stats(stats.getMin(), stats.getMax(), 
stats.getSum(),
-          stats.getAverage(), stats.getCount()), description, 
Purpose.INFORMATIONAL));
+          stats.getAverage(), stats.getCount()), units, description, 
Purpose.INFORMATIONAL));
       return this;
     }
 
-    public Builder info(String id, long amount, long time, String description) 
{
-      results.add(new Result(id, amount / (time / 1000.0), description, 
Purpose.INFORMATIONAL));
+    public Builder info(String id, long amount, long time, String units, 
String description) {
+      results
+          .add(new Result(id, amount / (time / 1000.0), units, description, 
Purpose.INFORMATIONAL));
       return this;
     }
 
-    public Builder info(String id, Number data, String description) {
-      results.add(new Result(id, data, description, Purpose.INFORMATIONAL));
+    public Builder info(String id, Number data, String units, String 
description) {
+      results.add(new Result(id, data, units, description, 
Purpose.INFORMATIONAL));
       return this;
     }
 
diff --git a/src/main/java/org/apache/accumulo/testing/performance/Result.java 
b/src/main/java/org/apache/accumulo/testing/performance/Result.java
index 4e67460..89c114f 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/Result.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/Result.java
@@ -22,6 +22,7 @@ public class Result {
   public final String id;
   public final Number data;
   public final Stats stats;
+  public final String units;
   public final String description;
   public final Purpose purpose;
 
@@ -36,18 +37,20 @@ public class Result {
     COMPARISON
   }
 
-  public Result(String id, Number data, String description, Purpose purpose) {
+  public Result(String id, Number data, String units, String description, 
Purpose purpose) {
     this.id = id;
     this.data = data;
     this.stats = null;
+    this.units = units;
     this.description = description;
     this.purpose = purpose;
   }
 
-  public Result(String id, Stats stats, String description, Purpose purpose) {
+  public Result(String id, Stats stats, String units, String description, 
Purpose purpose) {
     this.id = id;
-    this.stats = stats;
     this.data = null;
+    this.stats = stats;
+    this.units = units;
     this.description = description;
     this.purpose = purpose;
   }
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/ConditionalMutationsPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/ConditionalMutationsPT.java
index 59a91e7..39ca17f 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/ConditionalMutationsPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/ConditionalMutationsPT.java
@@ -55,6 +55,8 @@ import com.google.common.hash.Hashing;
 
 public class ConditionalMutationsPT implements PerformanceTest {
 
+  private static final String conditionsPerSec = "conditions/sec";
+
   @Override
   public SystemConfiguration getSystemConfig() {
     Map<String,String> siteCfg = new HashMap<>();
@@ -102,8 +104,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate: 1-20",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "ConditionalMutationsTest: average rate (conditions/sec) to run 
sequence 1-20");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "ConditionalMutationsTest: average rate to run sequence 1-20");
 
     env.getClient().tableOperations().flush(tableName, null, null, true);
 
@@ -113,8 +115,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate: 21-40",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "ConditionalMutationsTest: average rate (conditions/sec) to run 
sequence 21-40");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "ConditionalMutationsTest: average rate to run sequence 21-40");
   }
 
   public static double conditionalMutationsTime(ConditionalWriter cw, long 
seq) throws Exception {
@@ -178,8 +180,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate: 1-20",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "RandomizeConditionalMutationsTest: average rate (conditions/sec) to 
run sequence 1-20");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "RandomizeConditionalMutationsTest: average rate to run sequence 
1-20");
 
     env.getClient().tableOperations().flush(tableName, null, null, true);
 
@@ -189,8 +191,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate: 21-40",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "RandomizeConditionalMutationsTest: average rate (conditions/sec) to 
run sequence 21-40");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "RandomizeConditionalMutationsTest: average rate to run sequence 
21-40");
   }
 
   private static double randomizeConditionalMutationsTime(ConditionalWriter 
cw, long seq)
@@ -266,8 +268,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate: 1-20",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "RandomizeBatchScanAndWriteTest: average rate (conditions/sec) to 
write and scan sequence 1-20");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "RandomizeBatchScanAndWriteTest: average rate to write and scan 
sequence 1-20");
 
     env.getClient().tableOperations().flush(tableName, null, null, true);
 
@@ -277,8 +279,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate: 21-40",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "RandomizeBatchScanAndWriteTest: average rate (conditions/sec) to 
write and scan sequence 21-40 post flush");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "RandomizeBatchScanAndWriteTest: average rate to write and scan 
sequence 21-40 post flush");
   }
 
   private static double randomizeBatchWriteAndScanTime(BatchWriter bw, 
BatchScanner bs, long seq)
@@ -349,8 +351,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate1",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 
numTest)),
-        "SetBlockSizeTest: average rate in conditions/sec");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 
numTest)), conditionsPerSec,
+        "SetBlockSizeTest: average rate");
 
     env.getClient().tableOperations().flush(tableName, null, null, true);
 
@@ -360,8 +362,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate2",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 
numTest)),
-        "SetBlockSizeTest: average rate in conditions/sec post flush");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 
numTest)), conditionsPerSec,
+        "SetBlockSizeTest: average rate post flush");
 
     env.getClient().tableOperations().compact(tableName, null, null, true, 
true);
 
@@ -371,8 +373,8 @@ public class ConditionalMutationsPT implements 
PerformanceTest {
     }
 
     reportBuilder.result("avgRate3",
-        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)),
-        "SetBlockSizeTest: average rate in conditions/sec post compaction");
+        Double.parseDouble(new DecimalFormat("#0.00").format(rateSum / 20)), 
conditionsPerSec,
+        "SetBlockSizeTest: average rate post compaction");
     reportBuilder.parameter("numRows", numRows, "SetBlockSizeTest: The number 
of rows");
     reportBuilder.parameter("numCols", numCols, "SetBlockSizeTest: The number 
of columns");
     reportBuilder.parameter("numTest", numTest,
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/DurabilityWriteSpeedPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/DurabilityWriteSpeedPT.java
index 4946db8..55a1bf5 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/DurabilityWriteSpeedPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/DurabilityWriteSpeedPT.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.testing.performance.tests;
 
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -56,7 +57,7 @@ public class DurabilityWriteSpeedPT implements 
PerformanceTest {
         createTable(client, tableName, durability);
         long median = writeSome(reportBuilder, client, tableName, N, 
durability);
         tableOps.delete(tableName);
-        reportBuilder.result(durability + " Median", median,
+        reportBuilder.result(durability + " Median", median, 
TimeUnit.MILLISECONDS.toString(),
             "Median time result for " + durability);
       }
     }
@@ -82,7 +83,7 @@ public class DurabilityWriteSpeedPT implements 
PerformanceTest {
       }
       attempts[attempt] = System.currentTimeMillis() - now;
       reportBuilder.info(durabilityLevel + " attempt " + attempt, 
System.currentTimeMillis() - now,
-          "Times for each attempt in ms");
+          TimeUnit.MILLISECONDS.toString(), "Times for each attempt");
     }
     Arrays.sort(attempts);
     // Return the median duration
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/GroupCommitPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/GroupCommitPT.java
index bea5e0e..1d9c130 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/GroupCommitPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/GroupCommitPT.java
@@ -222,11 +222,11 @@ public class GroupCommitPT implements PerformanceTest {
 
     env.getClient().tableOperations().delete(tableName);
     if (warmup) {
-      report.info("warmup_rate_" + numThreads, NUM_MUTATIONS, t2 - t1, "The 
warmup rate at which "
-          + numThreads + " threads wrote data. The rate is mutations per 
second.");
+      report.info("warmup_rate_" + numThreads, NUM_MUTATIONS, t2 - t1, 
"mutations/sec",
+          "The warmup rate at which " + numThreads + " threads wrote data.");
     } else {
-      report.result("rate_" + numThreads, NUM_MUTATIONS, t2 - t1, "The rate at 
which " + numThreads
-          + " threads wrote data. The rate is mutations per second.");
+      report.result("rate_" + numThreads, NUM_MUTATIONS, t2 - t1, 
"mutations/sec",
+          "The rate at which " + numThreads + " threads wrote data.");
     }
   }
 
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/HerdingPT.java 
b/src/main/java/org/apache/accumulo/testing/performance/tests/HerdingPT.java
index 9f644fd..a1c4340 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/HerdingPT.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/HerdingPT.java
@@ -27,6 +27,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -86,7 +87,8 @@ public class HerdingPT implements PerformanceTest {
     final AccumuloClient client = env.getClient();
     initTable(client);
     long herdTime = getHerdingDuration(client);
-    reportBuilder.result("herd_time", herdTime, "The time (in ms) it took 
herding to complete.");
+    reportBuilder.result("herd_time", herdTime, 
TimeUnit.MILLISECONDS.toString(),
+        "The time it took herding to complete.");
 
     return reportBuilder.build();
   }
@@ -111,10 +113,10 @@ public class HerdingPT implements PerformanceTest {
     byte[] row = toZeroPaddedString(rowNum, 8);
     Mutation mutation = new Mutation(row);
     for (int col = 0; col < NUM_COLS; col++) {
-      byte[] qualifer = toZeroPaddedString(col, 4);
+      byte[] qualifier = toZeroPaddedString(col, 4);
       byte[] value = new byte[32];
       random.nextBytes(value);
-      mutation.put(COL_FAM, qualifer, value);
+      mutation.put(COL_FAM, qualifier, value);
     }
     return mutation;
   }
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/HighSplitCreationPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/HighSplitCreationPT.java
index 186fbbf..b224a01 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/HighSplitCreationPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/HighSplitCreationPT.java
@@ -62,8 +62,8 @@ public class HighSplitCreationPT implements PerformanceTest {
     long totalTime = System.currentTimeMillis() - start;
     double splitsPerSecond = NUM_SPLITS / (totalTime / ONE_SECOND);
 
-    reportBuilder.result("splits_per_second", splitsPerSecond,
-        "The average number of splits created per second.");
+    reportBuilder.result("splits_per_second", splitsPerSecond, "splits/sec",
+        "The average rate of split creation.");
 
     return reportBuilder.build();
   }
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
index c09fe2e..b841fec 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
@@ -29,6 +29,7 @@ import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 import java.util.stream.Stream;
 
 import org.apache.accumulo.core.client.AccumuloClient;
@@ -57,6 +58,8 @@ public class RandomCachedLookupsPT implements PerformanceTest 
{
   private static final int NUM_LOOKUPS_PER_THREAD = 25000;
   private static final int NUM_ROWS = 100000;
 
+  private static final String ms = TimeUnit.MILLISECONDS.toString();
+
   @Override
   public SystemConfiguration getSystemConfig() {
     Map<String,String> siteCfg = new HashMap<>();
@@ -89,40 +92,42 @@ public class RandomCachedLookupsPT implements 
PerformanceTest {
     long d64 = doLookups(env.getClient(), 64, NUM_LOOKUPS_PER_THREAD);
     long d128 = doLookups(env.getClient(), 128, NUM_LOOKUPS_PER_THREAD);
 
+    final String lookupPerSec = "lookups/sec";
+
     reportBuilder.id("smalls");
     reportBuilder.description(
         "Runs multiple threads each doing lots of small random scans.  For 
this test data and index cache are enabled.");
-    reportBuilder.info("warmup", 32 * NUM_LOOKUPS_PER_THREAD, warmup,
-        "Random lookup per sec for 32 threads");
-    reportBuilder.info("lookups_1", NUM_LOOKUPS_PER_THREAD, d1,
-        "Random lookup per sec rate for 1 thread");
-    reportBuilder.info("lookups_4", 4 * NUM_LOOKUPS_PER_THREAD, d4,
-        "Random lookup per sec rate for 4 threads");
-    reportBuilder.info("lookups_8", 8 * NUM_LOOKUPS_PER_THREAD, d8,
-        "Random lookup per sec rate for 8 threads");
-    reportBuilder.info("lookups_16", 16 * NUM_LOOKUPS_PER_THREAD, d16,
-        "Random lookup per sec rate for 16 threads");
-    reportBuilder.info("lookups_32", 32 * NUM_LOOKUPS_PER_THREAD, d32,
-        "Random lookup per sec rate for 32 threads");
-    reportBuilder.info("lookups_64", 64 * NUM_LOOKUPS_PER_THREAD, d64,
-        "Random lookup per sec rate for 64 threads");
-    reportBuilder.info("lookups_128", 128 * NUM_LOOKUPS_PER_THREAD, d128,
-        "Random lookup per sec rate for 128 threads");
-
-    reportBuilder.result("avg_1", d1 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 1 thread");
-    reportBuilder.result("avg_4", d4 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 4 threads");
-    reportBuilder.result("avg_8", d8 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 8 threads");
-    reportBuilder.result("avg_16", d16 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 16 threads");
-    reportBuilder.result("avg_32", d32 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 32 threads");
-    reportBuilder.result("avg_64", d64 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 64 threads");
-    reportBuilder.result("avg_128", d128 / (double) NUM_LOOKUPS_PER_THREAD,
-        "Average milliseconds per lookup for 128 threads");
+    reportBuilder.info("warmup", 32 * NUM_LOOKUPS_PER_THREAD, warmup, 
lookupPerSec,
+        "Random lookup rate for 32 threads");
+    reportBuilder.info("lookups_1", NUM_LOOKUPS_PER_THREAD, d1, lookupPerSec,
+        "Random lookup rate for 1 thread");
+    reportBuilder.info("lookups_4", 4 * NUM_LOOKUPS_PER_THREAD, d4, 
lookupPerSec,
+        "Random lookup rate for 4 threads");
+    reportBuilder.info("lookups_8", 8 * NUM_LOOKUPS_PER_THREAD, d8, 
lookupPerSec,
+        "Random lookup rate for 8 threads");
+    reportBuilder.info("lookups_16", 16 * NUM_LOOKUPS_PER_THREAD, d16, 
lookupPerSec,
+        "Random lookup rate for 16 threads");
+    reportBuilder.info("lookups_32", 32 * NUM_LOOKUPS_PER_THREAD, d32, 
lookupPerSec,
+        "Random lookup rate for 32 threads");
+    reportBuilder.info("lookups_64", 64 * NUM_LOOKUPS_PER_THREAD, d64, 
lookupPerSec,
+        "Random lookup rate for 64 threads");
+    reportBuilder.info("lookups_128", 128 * NUM_LOOKUPS_PER_THREAD, d128, 
lookupPerSec,
+        "Random lookup rate for 128 threads");
+
+    reportBuilder.result("avg_1", d1 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 1 thread");
+    reportBuilder.result("avg_4", d4 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 4 threads");
+    reportBuilder.result("avg_8", d8 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 8 threads");
+    reportBuilder.result("avg_16", d16 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 16 threads");
+    reportBuilder.result("avg_32", d32 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 32 threads");
+    reportBuilder.result("avg_64", d64 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 64 threads");
+    reportBuilder.result("avg_128", d128 / (double) NUM_LOOKUPS_PER_THREAD, ms,
+        "Average duration per lookup for 128 threads");
 
     return reportBuilder.build();
   }
@@ -196,12 +201,14 @@ public class RandomCachedLookupsPT implements 
PerformanceTest {
 
     long t6 = System.currentTimeMillis();
 
-    reportBuilder.info("create", t2 - t1, "Time to create table in ms");
-    reportBuilder.info("split", t3 - t2, "Time to split table in ms");
-    reportBuilder.info("write", 4 * numRows, t4 - t3, "Rate to write data in 
entries/sec");
-    reportBuilder.info("compact", 4 * numRows, t5 - t4, "Rate to compact table 
in entries/sec");
-    reportBuilder.info("fullScan", 4 * numRows, t6 - t5,
-        "Rate to do full table scan in entries/sec");
+    reportBuilder.info("create", t2 - t1, ms, "Time to create table");
+    reportBuilder.info("split", t3 - t2, ms, "Time to split table");
+    reportBuilder.info("write", 4 * numRows, t4 - t3, "entries/sec",
+        "Rate at which data are written");
+    reportBuilder.info("compact", 4 * numRows, t5 - t4, "entries/sec",
+        "Rate at which tables are compacted");
+    reportBuilder.info("fullScan", 4 * numRows, t6 - t5, "entries/sec",
+        "Rate at which full table scans take place");
   }
 
   private static long doLookups(AccumuloClient client, int numThreads, int 
numScansPerThread)
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/RollWALPT.java 
b/src/main/java/org/apache/accumulo/testing/performance/tests/RollWALPT.java
index 327efc0..a91a249 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/RollWALPT.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/RollWALPT.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -85,14 +86,12 @@ public class RollWALPT implements PerformanceTest {
 
     AccumuloClient client = env.getClient();
     final long smallWALTime = evalSmallWAL(client);
-    reportBuilder.result("small_wal_write_time", smallWALTime,
-        "The time (in ns) it took to write entries to the table with a small 
WAL of "
-            + SIZE_SMALL_WAL);
+    reportBuilder.result("small_wal_write_time", smallWALTime, 
TimeUnit.NANOSECONDS.toString(),
+        "The time taken to write entries to the table with a small WAL of " + 
SIZE_SMALL_WAL);
 
     final long largeWALTime = evalLargeWAL(client);
-    reportBuilder.result("large_wal_write_time", largeWALTime,
-        "The time (in ns) it took to write entries to the table with a large 
WAL of "
-            + SIZE_LARGE_WAL);
+    reportBuilder.result("large_wal_write_time", largeWALTime, 
TimeUnit.NANOSECONDS.toString(),
+        "The time taken to write entries to the table with a large WAL of " + 
SIZE_LARGE_WAL);
     return reportBuilder.build();
   }
 
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
index e59d480..687308d 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
@@ -24,6 +24,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.accumulo.core.client.AccumuloClient;
@@ -117,21 +118,26 @@ public class ScanExecutorPT implements PerformanceTest {
 
     Report.Builder builder = Report.builder();
 
+    final String ms = TimeUnit.MILLISECONDS.toString();
+
     builder.id("sexec").description(TEST_DESC);
-    builder.info("write", NUM_ROWS * NUM_FAMS * NUM_QUALS, t2 - t1, "Data 
write rate entries/sec ");
-    builder.info("compact", NUM_ROWS * NUM_FAMS * NUM_QUALS, t3 - t2, "Compact 
rate entries/sec ");
-    builder.info("short_times1", shortStats1, "Times in ms for each short 
scan.  First run.");
-    builder.info("short_times2", shortStats2, "Times in ms for each short 
scan. Second run.");
-    builder.result("short", shortStats2.getAverage(),
-        "Average times in ms for short scans from 2nd run.");
-    builder.info("long_counts", longStats, "Entries read by each long scan 
threads");
-    builder.info("long", longStats.getSum(), (t4 - t3),
-        "Combined rate in entries/second of all long scans");
+    builder.info("write", NUM_ROWS * NUM_FAMS * NUM_QUALS, t2 - t1, 
"entries/sec",
+        "Data write rate");
+    builder.info("compact", NUM_ROWS * NUM_FAMS * NUM_QUALS, t3 - t2, 
"entries/sec",
+        "Compact rate");
+    builder.info("short_times1", shortStats1, ms, "Duration of each short scan 
from first run.");
+    builder.info("short_times2", shortStats2, ms, "Duration of each short scan 
from second run.");
+    builder.result("short", shortStats2.getAverage(), ms,
+        "Average duration of short scans from second run.");
+    builder.info("long_counts", longStats, "entries read",
+        "Entries read by each long scan threads");
+    builder.info("long", longStats.getSum(), (t4 - t3), "entries/sec",
+        "Combined rate of all long scans");
     builder.parameter("short_threads", NUM_SHORT_SCANS_THREADS, "Threads used 
to run short scans.");
     builder.parameter("long_threads", NUM_LONG_SCANS,
         "Threads running long scans.  Each thread repeatedly scans entire 
table for duration of test.");
     builder.parameter("rows", NUM_ROWS, "Rows in test table");
-    builder.parameter("familes", NUM_FAMS, "Families per row in test table");
+    builder.parameter("families", NUM_FAMS, "Families per row in test table");
     builder.parameter("qualifiers", NUM_QUALS, "Qualifiers per family in test 
table");
     builder.parameter("server_scan_threads", SCAN_EXECUTOR_THREADS,
         "Server side scan handler threads");
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
index 4278b2c..5a1f59f 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
@@ -21,6 +21,7 @@ import java.util.HashSet;
 import java.util.LongSummaryStatistics;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.Scanner;
@@ -38,7 +39,7 @@ import com.google.common.collect.Iterables;
 
 public class ScanFewFamiliesPT implements PerformanceTest {
 
-  private static final String DESC = "This test times fetching a few column 
famlies when rows have many column families.";
+  private static final String DESC = "This test times fetching a few column 
families when rows have many column families.";
 
   private static final int NUM_ROWS = 500;
   private static final int NUM_FAMS = 10000;
@@ -66,21 +67,25 @@ public class ScanFewFamiliesPT implements PerformanceTest {
 
     Report.Builder builder = Report.builder();
 
+    final String ms = TimeUnit.MILLISECONDS.toString();
+
     for (int numFams : new int[] {1, 2, 4, 8, 16}) {
       LongSummaryStatistics stats = runScans(env, tableName, numFams);
       String fams = Strings.padStart(numFams + "", 2, '0');
-      builder.info("f" + fams + "_stats", stats,
-          "Times in ms to fetch " + numFams + " families from all rows");
-      builder.result("f" + fams, stats.getAverage(),
-          "Average time in ms to fetch " + numFams + " families from all 
rows");
+      builder.info("f" + fams + "_stats", stats, ms,
+          "Time to fetch " + numFams + " families from all rows");
+      builder.result("f" + fams, stats.getAverage(), ms,
+          "Average time to fetch " + numFams + " families from all rows");
     }
 
     builder.id("sfewfam");
     builder.description(DESC);
-    builder.info("write", NUM_ROWS * NUM_FAMS * NUM_QUALS, t2 - t1, "Data 
write rate entries/sec ");
-    builder.info("compact", NUM_ROWS * NUM_FAMS * NUM_QUALS, t3 - t2, "Compact 
rate entries/sec ");
+    builder.info("write", NUM_ROWS * NUM_FAMS * NUM_QUALS, t2 - t1, 
"entries/sec",
+        "Data write rate");
+    builder.info("compact", NUM_ROWS * NUM_FAMS * NUM_QUALS, t3 - t2, 
"entries/sec",
+        "Compact rate");
     builder.parameter("rows", NUM_ROWS, "Rows in test table");
-    builder.parameter("familes", NUM_FAMS, "Families per row in test table");
+    builder.parameter("families", NUM_FAMS, "Families per row in test table");
     builder.parameter("qualifiers", NUM_QUALS, "Qualifiers per family in test 
table");
 
     return builder.build();
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/SplitBalancingPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/SplitBalancingPT.java
index e36c62e..094fbc2 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/SplitBalancingPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/SplitBalancingPT.java
@@ -80,7 +80,7 @@ public class SplitBalancingPT implements PerformanceTest {
       boolean balanced = count >= min && count <= max;
       allServersBalanced = allServersBalanced & balanced;
 
-      reportBuilder.result("size_tserver_" + tabletServer, count,
+      reportBuilder.result("size_tserver_" + tabletServer, count, "tablet 
count",
           "Total tablets assigned to tablet server " + tabletServer);
     }
 
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/TableDeletionDuringSplitPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/TableDeletionDuringSplitPT.java
index 303f108..de603e7 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/TableDeletionDuringSplitPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/TableDeletionDuringSplitPT.java
@@ -27,6 +27,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.LongAdder;
 
@@ -112,18 +113,18 @@ public class TableDeletionDuringSplitPT implements 
PerformanceTest {
 
     List<Runnable> queued = pool.shutdownNow();
 
-    reportBuilder.result("remaining_pending_tasks", countRemaining(iter),
+    reportBuilder.result("remaining_pending_tasks", countRemaining(iter), 
"task count",
         "The number of remaining pending tasks.");
-    reportBuilder.result("remaining_submitted_tasks", queued.size(),
+    reportBuilder.result("remaining_submitted_tasks", queued.size(), "task 
count",
         "The number of remaining submitted tasks.");
 
     long totalRemainingTables = Arrays.stream(tableNames)
         .filter((name) -> client.tableOperations().exists(name)).count();
-    reportBuilder.result("total_remaining_tables", totalRemainingTables,
+    reportBuilder.result("total_remaining_tables", totalRemainingTables, 
"table count",
         "The total number of unsuccessfully deleted tables.");
     Long deletionTime = deletionTimes.sum() / deletedTables.get();
-    reportBuilder.result("avg_deletion_time", deletionTime,
-        "The average deletion time (in ms) to delete a table.");
+    reportBuilder.result("avg_deletion_time", deletionTime, 
TimeUnit.NANOSECONDS.toString(),
+        "The average time taken to delete a table.");
   }
 
   private List<Runnable> getTasks(final String[] tableNames, final 
AccumuloClient client,
diff --git 
a/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
 
b/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
index 8fe424f..310c971 100644
--- 
a/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
+++ 
b/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
@@ -24,6 +24,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.accumulo.core.client.AccumuloClient;
@@ -91,7 +92,7 @@ public class YieldingScanExecutorPT implements 
PerformanceTest {
     String tableName = "scept";
 
     Map<String,String> props = new HashMap<>();
-    // set up a scan dispatcher that send long runnning scans (> 500ms) to the 
second executor
+    // set up a scan dispatcher that send long-running scans (> 500ms) to the 
second executor
     props.put(Property.TABLE_SCAN_DISPATCHER.getKey(), 
TimedScanDispatcher.class.getName());
     props.put(Property.TABLE_SCAN_DISPATCHER_OPTS.getKey() + "quick.executor", 
"se1");
     props.put(Property.TABLE_SCAN_DISPATCHER_OPTS.getKey() + "quick.time.ms", 
QUICK_SCAN_TIME);
@@ -122,29 +123,34 @@ public class YieldingScanExecutorPT implements 
PerformanceTest {
 
     Report.Builder builder = Report.builder();
 
+    final String ms = TimeUnit.MILLISECONDS.toString();
+
     builder.id("yfexec").description(TEST_DESC);
-    builder.info("write", NUM_ROWS * NUM_FAMS * NUM_QUALS, t2 - t1, "Data 
write rate entries/sec ");
-    builder.info("compact", NUM_ROWS * NUM_FAMS * NUM_QUALS, t3 - t2, "Compact 
rate entries/sec ");
-    builder.info("short_times1", shortStats1, "Times in ms for each short 
scan.  First run.");
-    builder.info("short_times2", shortStats2, "Times in ms for each short 
scan. Second run.");
-    builder.result("short", shortStats2.getAverage(),
-        "Average times in ms for short scans from 2nd run.");
-    builder.info("long_counts", longStats, "Entries read by each of the filter 
threads");
-    builder.info("long", longStats.getSum(), (t4 - t3),
-        "Combined rate in entries/second of all long scans.  This should be 
low but non-zero.");
+    builder.info("write", NUM_ROWS * NUM_FAMS * NUM_QUALS, t2 - t1, 
"entries/sec",
+        "Data write rate");
+    builder.info("compact", NUM_ROWS * NUM_FAMS * NUM_QUALS, t3 - t2, 
"entries/sec",
+        "Compact rate");
+    builder.info("short_times1", shortStats1, ms, "Duration of each short scan 
from first run.");
+    builder.info("short_times2", shortStats2, ms, "Duration of each short scan 
from second run.");
+    builder.result("short", shortStats2.getAverage(), ms,
+        "Average duration of short scans from second run.");
+    builder.info("long_counts", longStats, "entry count",
+        "Entries read by each of the filter threads");
+    builder.info("long", longStats.getSum(), (t4 - t3), "entries/sec",
+        "Combined rate of all long scans. This should be low but non-zero.");
     builder.parameter("short_threads", NUM_SHORT_SCANS_THREADS, "Threads used 
to run short scans.");
     builder.parameter("long_threads", NUM_LONG_SCANS,
-        "Threads running long fileter scans.  Each thread repeatedly scans 
entire table for "
+        "Threads running long filter scans.  Each thread repeatedly scans 
entire table for "
             + "duration of test randomly returning a few of the keys.");
     builder.parameter("rows", NUM_ROWS, "Rows in test table");
-    builder.parameter("familes", NUM_FAMS, "Families per row in test table");
+    builder.parameter("families", NUM_FAMS, "Families per row in test table");
     builder.parameter("qualifiers", NUM_QUALS, "Qualifiers per family in test 
table");
     builder.parameter("server_scan_threads", SCAN_EXECUTOR_THREADS,
-        "Server side scan handler threads that each executor has.  There are 2 
executors.");
+        "Server side scan handler threads that each executor has. There are 2 
executors.");
 
     builder.parameter("filter_probabilities", FILTER_PROBABILITIES,
         "The chances that one of the long "
-            + "filter scans will return any key it sees. The probabilites are 
cycled through when "
+            + "filter scans will return any key it sees. The probabilities are 
cycled through when "
             + "starting long scans.");
     builder.parameter("filter_yield_time", FILTER_YIELD_TIME,
         "The time in ms after which one of " + "the long filter scans will 
yield.");

Reply via email to