Hello Stefan,

Am 26.05.2018 um 04:34 schrieb Stefan Brüns:
> To avoid large number of allocations for common simple polygons,
> boost::container::small_vector<4, T> is used, which stores up to
> 4 intersections inline. small_vector is a header-only class.

Even though it is header-only, it still means that Boost becomes a new
build time dependency. Hence also CMakeLists.txt needs to adjust to find
the Boost headers in possibly non-standard paths and giving a configure
time error message if the necessary Boost headers are not present.

Personally, I would like it very much if Boost becomes a build time
dependency for it contains many useful primitives and utilities. But I
guess it would be best to reduce your XPathScanner patch to something
purely based on the standard library - even if it allocates some more -
and tackle the issue of using small_vector in a separate follow-up patch.

Also, maybe you would like to run this on a larger collection of
documents to see a more statistical view of the performance
improvements? Possibly, the attached perftest scripts helps with that.

Best regards,
Adam
From 4790f36aacd9116b8596f3801a429a23508ef41a Mon Sep 17 00:00:00 2001
From: Adam Reichold <[email protected]>
Date: Tue, 20 Feb 2018 08:41:04 +0100
Subject: [PATCH] Add simple Python script to measure and comprare rendering
 performance of Poppler builds.

---
 CMakeLists.txt            |   1 +
 perftest/CMakeLists.txt   |   4 +
 perftest/compare.py       | 114 +++++++++++++++
 perftest/driver.cc        | 291 ++++++++++++++++++++++++++++++++++++++
 perftest/measure.py       | 120 ++++++++++++++++
 perftest/poppler-perftest |  40 ++++++
 perftest/util.py          |  20 +++
 7 files changed, 590 insertions(+)
 create mode 100644 perftest/CMakeLists.txt
 create mode 100644 perftest/compare.py
 create mode 100644 perftest/driver.cc
 create mode 100644 perftest/measure.py
 create mode 100755 perftest/poppler-perftest
 create mode 100644 perftest/util.py

diff --git a/CMakeLists.txt b/CMakeLists.txt
index b5a86a5f..a1ce63ea 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -667,6 +667,7 @@ if(ENABLE_GLIB)
   add_subdirectory(glib)
 endif()
 add_subdirectory(test)
+add_subdirectory(perftest)
 if(ENABLE_QT5)
   add_subdirectory(qt5)
 endif()
diff --git a/perftest/CMakeLists.txt b/perftest/CMakeLists.txt
new file mode 100644
index 00000000..84e1e3a2
--- /dev/null
+++ b/perftest/CMakeLists.txt
@@ -0,0 +1,4 @@
+if (ENABLE_SPLASH)
+  add_executable(driver driver.cc)
+  target_link_libraries(driver poppler)
+endif ()
diff --git a/perftest/compare.py b/perftest/compare.py
new file mode 100644
index 00000000..4bf8082a
--- /dev/null
+++ b/perftest/compare.py
@@ -0,0 +1,114 @@
+import pickle
+import zlib
+
+from util import reference, reldev, maxabs
+
+def collect_stats3a(stats, entry):
+    if stats is None:
+        stats = (0, 0)
+
+    sum, acc = stats
+    mean, stdev = entry
+    stats = (sum + mean, acc + abs(stdev / mean))
+
+    return stats
+
+def collect_stats3b(stats, entry):
+    if stats is None:
+        stats = 0
+
+    stats += entry
+
+    return stats
+
+def collect_stats2(stats, entry):
+    if stats is None:
+        stats = { 'run_time': None, 'memory_usage': None }
+
+    stats['run_time'] = collect_stats3a(stats['run_time'], entry['run_time'])
+    stats['memory_usage'] = collect_stats3b(stats['memory_usage'], entry['memory_usage'])
+
+    return stats
+
+def collect_stats1(stats, entry, ref_entry):
+    if stats is None:
+        stats = { 'results': None, 'ref_results': None }
+
+    stats['results'] = collect_stats2(stats['results'], entry)
+    stats['ref_results'] = collect_stats2(stats['ref_results'], ref_entry)
+
+    return stats
+
+def print_stats(count, stats):
+    run_time_sum, ref_run_time_sum = stats['results']['run_time'][0], stats['ref_results']['run_time'][0]
+    run_time_acc, ref_run_time_acc = stats['results']['run_time'][1] / count, stats['ref_results']['run_time'][1] / count
+    run_time_reldev = reldev(run_time_sum, ref_run_time_sum)
+
+    memory_usage_sum, ref_memory_usage_sum = stats['results']['memory_usage'], stats['ref_results']['memory_usage']
+    memory_usage_reldev = reldev(memory_usage_sum, ref_memory_usage_sum)
+
+    print('\tCumulative run time:')
+    print('\t\tResult: %.2f min ∓ %.1f %%' % (run_time_sum * 1.0e-6 / 60.0, run_time_acc * 100.0))
+    print('\t\tReference: %.2f min ∓ %.1f %%' % (ref_run_time_sum * 1.0e-6 / 60.0, ref_run_time_acc * 100.0))
+    print('\t\tDeviation: %+.2f %%' % (run_time_reldev))
+    print('\tCumulative memory usage:')
+    print('\t\tResult: %.1f MB %%' % (memory_usage_sum / 1024.0 / 1024.0))
+    print('\t\tReference: %.1f MB %%' % (ref_memory_usage_sum / 1024.0 / 1024.0))
+    print('\t\tDeviation: %+.2f %%' % (memory_usage_reldev))
+
+def compare(args):
+    with open(args.results, 'rb') as file:
+        data = file.read()
+        data = zlib.decompress(data)
+        results = pickle.loads(data)
+    with open(args.reference_results, 'rb') as file:
+        data = file.read()
+        data = zlib.decompress(data)
+        ref_results = pickle.loads(data)
+
+    count = 0
+    stats = None
+    count_above_threshold = 0
+    stats_above_threshold = None
+
+    for document in results.keys() & ref_results.keys():
+        entries = results[document]
+        ref_entries = ref_results[document]
+
+        for page in entries.keys() & ref_entries.keys():
+            entry = entries[page]
+            ref_entry = ref_entries[page]
+
+            count += 1
+            stats = collect_stats1(stats, entry, ref_entry)
+
+            run_time_mean, run_time_stdev = entry['run_time']
+            ref_run_time_mean, ref_run_time_stdev = ref_entry['run_time']
+            run_time_reldev = reldev(run_time_mean, ref_run_time_mean)
+
+            memory_usage = entry['memory_usage']
+            ref_memory_usage = ref_entry['memory_usage']
+            memory_usage_reldev = reldev(memory_usage, ref_memory_usage)
+
+            if maxabs(run_time_reldev, memory_usage_reldev) <= args.threshold:
+                continue
+
+            count_above_threshold += 1
+            stats_above_threshold = collect_stats1(stats_above_threshold, entry, ref_entry)
+
+            print('%s:' % (reference(document, page)))
+            print('\tRun time:')
+            print('\t\tResult: %.2f ∓ %.3f s' % (run_time_mean * 1.0e-6, run_time_stdev * 1.0e-6))
+            print('\t\tReference: %.2f ∓ %.3f s' % (ref_run_time_mean * 1.0e-6, ref_run_time_stdev * 1.0e-6))
+            print('\t\tDeviation: %.1f %%' % (run_time_reldev * 100.0))
+            print('\tMemory usage:')
+            print('\t\tResult: %.1f kB' % (memory_usage / 1024.0))
+            print('\t\tReference: %.1f kB' % (ref_memory_usage / 1024.0))
+            print('\t\tDeviation: %.1f %%' % (memory_usage_reldev * 100.0))
+
+    print('%d matching result(s):' % (count))
+    print_stats(count, stats)
+
+    if count_above_threshold != 0:
+        print('%d matching result(s) above the given threshold of %.1f %%:' % (count_above_threshold, args.threshold * 100.0))
+        print_stats(count_above_threshold, stats_above_threshold)
diff --git a/perftest/driver.cc b/perftest/driver.cc
new file mode 100644
index 00000000..f031f697
--- /dev/null
+++ b/perftest/driver.cc
@@ -0,0 +1,291 @@
+#include <string>
+#include <memory>
+#include <functional>
+#include <vector>
+#include <iostream>
+#include <algorithm>
+#include <numeric>
+
+#include <cstdlib>
+#include <cmath>
+
+#include <time.h>
+
+#include "PDFDoc.h"
+#include "SplashOutputDev.h"
+#include "splash/SplashBitmap.h"
+#include "TextOutputDev.h"
+
+namespace
+{
+
+std::unique_ptr<PDFDoc> openDocument(const char *filePath) {
+  std::unique_ptr<PDFDoc> document{
+    new PDFDoc(new GooString(filePath), nullptr, nullptr, nullptr)
+  };
+
+  if (!document->isOk()) {
+    document.reset();
+  }
+
+  return document;
+}
+
+std::unique_ptr<SplashOutputDev> openSplashDevice(PDFDoc *document, SplashColorPtr paperColor) {
+  std::unique_ptr<SplashOutputDev> device{
+    new SplashOutputDev(splashModeXBGR8, 4, gFalse, paperColor)
+  };
+
+  if (device) {
+    device->startDoc(document);
+  }
+
+  return device;
+}
+
+void displayPageUsingSplash(PDFDoc *document, SplashOutputDev *device, int page, double resolution) {
+  document->displayPage(
+	device,
+	page,
+	resolution, resolution, 0,
+	gFalse, gFalse, gFalse
+  );
+
+  delete device->takeBitmap();
+}
+
+bool renderAllPagesUsingSplash(PDFDoc *document, SplashColorPtr paperColor, double resolution) {
+  const auto device = openSplashDevice(document, paperColor);
+
+  if (!device) {
+    return false;
+  }
+
+  for (int page = 1, pageCount = document->getNumPages(); page <= pageCount; ++page) {
+    displayPageUsingSplash(document, device.get(), page, resolution);
+  }
+
+  return true;
+}
+
+bool renderPageUsingSplash(PDFDoc *document, int page, SplashColorPtr paperColor, double resolution) {
+  const auto device = openSplashDevice(document, paperColor);
+
+  if (!device) {
+    return false;
+  }
+
+  displayPageUsingSplash(document, device.get(), page, resolution);
+
+  delete device->takeBitmap();
+
+  return true;
+}
+
+std::unique_ptr<TextOutputDev> openTextDevice() {
+  std::unique_ptr<TextOutputDev> device{
+    new TextOutputDev(nullptr, gTrue, 0.0, gFalse, gFalse)
+  };
+
+  if (!device->isOk()) {
+      device.reset();
+  }
+
+  return device;
+}
+
+void displayPageAsText(PDFDoc *document, TextOutputDev *device, int page) {
+  document->displayPage(
+	device,
+	page,
+	72.0, 72.0, 0,
+	gFalse, gFalse, gFalse
+  );
+
+  delete device->makeWordList();
+}
+
+bool renderAllPagesAsText(PDFDoc *document) {
+  const auto device = openTextDevice();
+
+  if (!device) {
+    return false;
+  }
+
+  for (int page = 1, pageCount = document->getNumPages(); page <= pageCount; ++page) {
+    displayPageAsText(document, device.get(), page);
+  }
+
+  return true;
+}
+
+bool renderPageAsText(PDFDoc *document, int page) {
+  const auto device = openTextDevice();
+
+  if (!device) {
+    return false;
+  }
+
+  displayPageAsText(document, device.get(), page);
+
+  return true;
+}
+
+double compute_accuracy(const std::vector<double>& values) {
+  if (values.size() < 2) {
+    return std::numeric_limits<double>::max();
+  }
+
+  const auto sum = std::accumulate(values.begin(), values.end(), 0.0);
+  const auto mean = sum / values.size();
+
+  const auto variance = std::accumulate(
+    values.begin(), values.end(), 0.0,
+    [mean](double variance, double value) {
+      return variance + (value - mean) * (value - mean);
+    }
+  );
+  const auto stdev = std::sqrt(variance / (values.size() - 1));
+
+  return std::abs(stdev / mean);
+}
+
+bool check_page_count(const char* filePath) {
+  const auto document = openDocument(filePath);
+
+  if (!document) {
+    return false;
+  }
+
+  std::cout << document->getNumPages();
+
+  return true;
+}
+
+bool measure_action(const std::function<bool()>& action, int warmUpIterations, int minIterations, int maxIterations, double targetAccuracy) {
+  std::vector<double> runtimes;
+  runtimes.reserve(maxIterations);
+
+  for (int iteration = 1; iteration <= warmUpIterations; ++iteration) {
+    action();
+  }
+
+  for (int iteration = 1; iteration <= maxIterations; ++iteration) {
+    struct timespec before;
+    ::clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &before);
+
+    if (!action()) {
+      return false;
+    }
+
+    struct timespec after;
+    ::clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &after);
+
+    runtimes.emplace_back(
+	  (after.tv_sec - before.tv_sec) * 1.0e+6 +
+	  (after.tv_nsec - before.tv_nsec) * 1.0e-3
+    );
+
+    if (iteration >= minIterations && compute_accuracy(runtimes) <= targetAccuracy) {
+      break;
+    }
+  }
+
+  std::cout << "[";
+
+  for (auto runtime = runtimes.begin(); runtime != runtimes.end(); ++runtime) {
+    if (runtime != runtimes.begin()) {
+      std::cout << ',';
+    }
+    std::cout << *runtime;
+  }
+
+  std::cout << "]";
+
+  return true;
+}
+
+}
+
+int main(int argc, char** argv) {
+  std::unique_ptr<GlobalParams> globalParams{
+    new GlobalParams()
+  };
+
+  if (!globalParams) {
+    return EXIT_FAILURE;
+  }
+
+  ::globalParams = globalParams.get();
+
+  if (argc == 2) {
+    return check_page_count(argv[1]) ? EXIT_SUCCESS : EXIT_FAILURE;
+  }
+  else if (argc != 8) {
+    return EXIT_FAILURE;
+  }
+
+  const auto warmUpIterations = std::atoi(argv[1]);
+  const auto minIterations = std::atoi(argv[2]);
+  const auto maxIterations = std::atoi(argv[3]);
+  const auto targetAccuracy = std::atof(argv[4]);
+  const auto mode = std::string{argv[5]};
+  const auto filePath = argv[6];
+  const auto page = std::atoi(argv[7]);
+
+  if (warmUpIterations < 1 || minIterations < 1 || maxIterations < 1 || minIterations > maxIterations) {
+    return EXIT_FAILURE;
+  }
+
+  if (mode != "splash" && mode != "text") {
+    return EXIT_FAILURE;
+  }
+
+  SplashColor paperColor;
+  paperColor[0] = 0xFF;
+  paperColor[1] = 0xFF;
+  paperColor[2] = 0xFF;
+
+  const auto resolution = 72.0;
+
+  const auto document = openDocument(filePath);
+
+  if (!document) {
+    return EXIT_FAILURE;
+  }
+
+  const auto pageCount = document->getNumPages();
+
+  if (pageCount < 1 || pageCount < page) {
+    return EXIT_FAILURE;
+  }
+
+  std::function<bool()> action;
+
+  if (mode == "splash" && page < 1) {
+    action = [&document, &paperColor, resolution]() {
+      return renderAllPagesUsingSplash(document.get(), paperColor, resolution);
+    };
+  }
+  else if (mode == "splash") {
+    action = [&document, page, &paperColor, resolution]() {
+      return renderPageUsingSplash(document.get(), page, paperColor, resolution);
+    };
+  }
+  else if (mode == "text" && page < 1) {
+    action = [&document]() {
+      return renderAllPagesAsText(document.get());
+    };
+  }
+  else if (mode == "text") {
+    action = [&document, page]() {
+      return renderPageAsText(document.get(), page);
+    };
+  }
+
+  if (!action) {
+    return EXIT_FAILURE;
+  }
+
+  return measure_action(action, warmUpIterations, minIterations, maxIterations, targetAccuracy) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/perftest/measure.py b/perftest/measure.py
new file mode 100644
index 00000000..7d0f6202
--- /dev/null
+++ b/perftest/measure.py
@@ -0,0 +1,120 @@
+import json
+import multiprocessing
+import os
+import pickle
+import statistics
+import subprocess
+import sys
+import time
+import zlib
+
+from util import rewrite, reference
+
+def check_page_count(document):
+    global driver
+
+    try:
+        return int(subprocess.check_output([ driver, document ], stderr = subprocess.DEVNULL))
+    except (subprocess.CalledProcessError, ValueError):
+        return 0
+
+def measure_command(command):
+    try:
+        process = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.DEVNULL)
+
+        _, status, resources = os.wait4(process.pid, 0)
+
+        if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0:
+            return ' '.join(command)
+
+        run_times = json.load(process.stdout)
+
+        run_time = (statistics.mean(run_times), statistics.stdev(run_times))
+        memory_usage = resources.ru_maxrss
+
+        return (run_time, memory_usage)
+    except:
+        return sys.exc_info()[0]
+
+def measure_task(task):
+    global driver, mode, warm_up_iterations, min_iterations, max_iterations, target_accuracy
+
+    document, page = task
+
+    command = [
+        driver,
+        str(warm_up_iterations), str(min_iterations), str(max_iterations),
+        str(target_accuracy),
+        mode,
+        document,
+        str(page) if page else '0'
+    ]
+
+    return (document, page, measure_command(command))
+
+def measure(args):
+    global driver, mode, warm_up_iterations, min_iterations, max_iterations, target_accuracy
+
+    driver = args.driver
+    mode = args.mode
+    warm_up_iterations = args.warm_up_iterations
+    min_iterations = args.min_iterations
+    max_iterations = args.max_iterations
+    target_accuracy = args.target_accuracy
+
+    todo = 0
+
+    written = rewrite(0, 'Scanning...')
+
+    tasks = []
+
+    for path, _, files in os.walk(args.documents):
+        for file in files:
+            file_path = os.path.join(path, file)
+
+            page_count = check_page_count(file_path)
+
+            if page_count == 0:
+                continue
+
+            pages = range(1, page_count + 1) if args.pages else [ None ]
+
+            for page in pages:
+                tasks.append((file_path, page))
+
+                todo +=1
+                if todo % 100 == 0:
+                    written = rewrite(written, 'Found %d...' % todo)
+
+    done = 0
+
+    written = rewrite(written, '%d/%d (%.1f%%): Measuring...' % (done, todo, 0))
+    begin = time.time()
+
+    with multiprocessing.Pool() as pool:
+        results = {}
+
+        for result in pool.imap(measure_task, tasks):
+            document, page, measurement = result
+
+            try:
+                run_time, memory_usage = measurement
+            except:
+                rewrite(written, 'Measurement failed: %s\n' % (measurement))
+                sys.exit(1)
+
+            entry = results.setdefault(document, {}).setdefault(page, {})
+            entry['run_time'] = run_time
+            entry['memory_usage'] = memory_usage
+
+            done += 1
+            if done % max(1, todo // 500) == 0:
+                written = rewrite(written, '%d/%d (%.1f%%): Measured %s...' % (done, todo, 100 * done / todo, reference(document, page)))
+
+    end = time.time()
+    rewrite(written, '%d/%d (%.1f%%): Measurement took %s.\n' % (done, todo, 100, time.strftime('%T', time.gmtime(end - begin))))
+
+    with open(args.results, 'wb') as file:
+        data = pickle.dumps(results)
+        data = zlib.compress(data)
+        file.write(data)
diff --git a/perftest/poppler-perftest b/perftest/poppler-perftest
new file mode 100755
index 00000000..517ba48c
--- /dev/null
+++ b/perftest/poppler-perftest
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+
+import argparse
+import sys
+
+from measure import measure
+from compare import compare
+
+def main(args):
+    parser = argparse.ArgumentParser()
+    subparsers = parser.add_subparsers()
+
+    measure_parser = subparsers.add_parser('measure')
+    measure_parser.set_defaults(func=measure)
+    measure_parser.add_argument('--driver', default='./driver')
+    measure_parser.add_argument('--mode', choices=[ 'splash', 'text' ], default='splash')
+    measure_parser.add_argument('--pages', action='store_true')
+    measure_parser.add_argument('--warm_up_iterations', type = int, default=5)
+    measure_parser.add_argument('--min_iterations', type=int, default=5)
+    measure_parser.add_argument('--max_iterations', type=int, default=25)
+    measure_parser.add_argument('--target_accuracy', type=float, default=0.01)
+    measure_parser.add_argument('documents')
+    measure_parser.add_argument('results')
+
+    compare_parser = subparsers.add_parser('compare')
+    compare_parser.set_defaults(func=compare)
+    compare_parser.add_argument('--threshold', type=float, default=0.05)
+    compare_parser.add_argument('results')
+    compare_parser.add_argument('reference_results')
+
+    try:
+        args = parser.parse_args(args)
+        args.func(args)
+        return 0
+    except AttributeError:
+        parser.print_help()
+        return 1
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/perftest/util.py b/perftest/util.py
new file mode 100644
index 00000000..2d14a37f
--- /dev/null
+++ b/perftest/util.py
@@ -0,0 +1,20 @@
+import math
+import sys
+
+def rewrite(written, message):
+    sys.stdout.write('\r' * written + ' ' * written + '\r')
+    written = sys.stdout.write(message)
+    sys.stdout.flush()
+    return written
+
+def reference(document, page):
+    if page is not None:
+        return '%s[%d]' % (document, page)
+    else:
+        return document
+
+def reldev(x, y):
+    return (x - y) / y
+
+def maxabs(x, y):
+    return max(abs(x), abs(y))
-- 
2.17.0

Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
poppler mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/poppler

Reply via email to