EBernhardson has uploaded a new change for review. ( 
https://gerrit.wikimedia.org/r/370986 )

Change subject: [WIP] degrade queries based on latency percentiles
......................................................................

[WIP] degrade queries based on latency percentiles

TODO:
* Latency buckets based on query stat groups
* Background update thread for load stats, like histogram rotation
* Expose latency in parsing of DegradedCondition
* Tests
* Docs

Change-Id: I70ae1b148e1f15ea77f2fec5315c47faaaa7b3dc
---
M src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
M 
src/main/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilder.java
A src/main/java/org/wikimedia/search/extra/router/monitor/LoadService.java
A src/main/java/org/wikimedia/search/extra/router/monitor/LoadStats.java
A 
src/main/java/org/wikimedia/search/extra/router/monitor/SearchLatencyListener.java
M 
src/test/java/org/wikimedia/search/extra/router/DegradedRouterBuilderESTest.java
M 
src/test/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilderParserTest.java
M 
src/test/java/org/wikimedia/search/extra/router/TokenCountRouterParserTest.java
8 files changed, 357 insertions(+), 55 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/search/extra 
refs/changes/86/370986/1

diff --git a/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java 
b/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
index 5eeaf3a..062c00e 100644
--- a/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
+++ b/src/main/java/org/wikimedia/search/extra/ExtraPlugin.java
@@ -1,19 +1,29 @@
 package org.wikimedia.search.extra;
 
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.settings.Setting;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.index.IndexModule;
 import org.elasticsearch.index.analysis.TokenFilterFactory;
 import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
-import org.elasticsearch.monitor.os.OsService;
+import org.elasticsearch.monitor.os.OsProbe;
 import org.elasticsearch.plugins.AnalysisPlugin;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.plugins.ScriptPlugin;
 import org.elasticsearch.plugins.SearchPlugin;
 import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.watcher.ResourceWatcherService;
 import 
org.wikimedia.search.extra.analysis.filters.PreserveOriginalFilterFactory;
 import org.wikimedia.search.extra.fuzzylike.FuzzyLikeThisQueryBuilder;
 import org.wikimedia.search.extra.levenshtein.LevenshteinDistanceScoreBuilder;
 import org.wikimedia.search.extra.regex.SourceRegexQueryBuilder;
+import org.wikimedia.search.extra.router.monitor.LoadService;
+import org.wikimedia.search.extra.router.monitor.SearchLatencyListener;
 import org.wikimedia.search.extra.superdetectnoop.ChangeHandler;
 import org.wikimedia.search.extra.superdetectnoop.SetHandler;
 import org.wikimedia.search.extra.superdetectnoop.SuperDetectNoopScript;
@@ -23,24 +33,34 @@
 import org.wikimedia.search.extra.router.DegradedRouterQueryBuilder;
 import org.wikimedia.search.extra.router.TokenCountRouterQueryBuilder;
 
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 
 /**
  * Setup the Elasticsearch plugin.
  */
 public class ExtraPlugin extends Plugin implements SearchPlugin, 
AnalysisPlugin, ScriptPlugin {
 
-    private OsService osService;
+    private final SearchLatencyListener latencyListener;
+    private final LoadService loadService;
 
     public ExtraPlugin(Settings settings) {
-        // TODO: This collects way more info than we care about
-        osService = new OsService(settings);
+        latencyListener = new SearchLatencyListener(settings, 5);
+        loadService = new LoadService(settings, latencyListener, 
OsProbe.getInstance());
+    }
+
+    @Override
+    public List<Setting<?>> getSettings() {
+        return Arrays.asList(
+            LoadService.REFRESH_INTERVAL_SETTING,
+            SearchLatencyListener.NUM_LATENCY_BUCKETS,
+            SearchLatencyListener.ROTATION_DELAY);
+    }
+
+    @Override
+    public Collection<Object> createComponents(Client client, ClusterService 
clusterService, ThreadPool threadPool,
+                                               ResourceWatcherService 
resourceWatcherService, ScriptService scriptService,
+                                               NamedXContentRegistry 
xContentRegistry) {
+        return Collections.singletonList(loadService);
     }
 
     /**
@@ -53,7 +73,7 @@
                 new QuerySpec<>(SourceRegexQueryBuilder.NAME, 
SourceRegexQueryBuilder::new, SourceRegexQueryBuilder::fromXContent),
                 new QuerySpec<>(FuzzyLikeThisQueryBuilder.NAME, 
FuzzyLikeThisQueryBuilder::new, FuzzyLikeThisQueryBuilder::fromXContent),
                 new QuerySpec<>(TokenCountRouterQueryBuilder.NAME, 
TokenCountRouterQueryBuilder::new, TokenCountRouterQueryBuilder::fromXContent),
-                new QuerySpec<>(DegradedRouterQueryBuilder.NAME, (in) -> new 
DegradedRouterQueryBuilder(in, osService), (pc) -> 
DegradedRouterQueryBuilder.fromXContent(pc, osService))
+                new QuerySpec<>(DegradedRouterQueryBuilder.NAME, (in) -> new 
DegradedRouterQueryBuilder(in, loadService), (pc) -> 
DegradedRouterQueryBuilder.fromXContent(pc, loadService))
         );
     }
 
@@ -87,4 +107,9 @@
             )
         );
     }
+
+    @Override
+    public void onIndexModule(IndexModule indexModule) {
+        indexModule.addSearchOperationListener(latencyListener);
+    }
 }
diff --git 
a/src/main/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilder.java
 
b/src/main/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilder.java
index fc01930..b723aec 100644
--- 
a/src/main/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilder.java
+++ 
b/src/main/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilder.java
@@ -15,11 +15,11 @@
 import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.index.query.QueryParseContext;
 import org.elasticsearch.index.query.QueryRewriteContext;
-import org.elasticsearch.monitor.os.OsService;
-import org.elasticsearch.monitor.os.OsStats;
 
 import org.wikimedia.search.extra.router.AbstractRouterQueryBuilder.Condition;
 import 
org.wikimedia.search.extra.router.DegradedRouterQueryBuilder.DegradedCondition;
+import org.wikimedia.search.extra.router.monitor.LoadService;
+import org.wikimedia.search.extra.router.monitor.LoadStats;
 
 import java.io.IOException;
 import java.util.Objects;
@@ -51,15 +51,16 @@
 
     // This intentional is not considered in doEquals, because it makes
     // testing a pain. Maybe not a great idea though.
-    private OsService osService;
 
     DegradedRouterQueryBuilder() {
         super();
     }
 
-    public DegradedRouterQueryBuilder(StreamInput in, OsService osService) 
throws IOException {
+    LoadService loadService;
+
+    public DegradedRouterQueryBuilder(StreamInput in, LoadService loadService) 
throws IOException {
         super(in, DegradedCondition::new);
-        this.osService = osService;
+        this.loadService = loadService;
     }
 
     private static DegradedCondition parseCondition(XContentParser parser, 
QueryParseContext parseContext) throws IOException {
@@ -71,24 +72,22 @@
         return NAME.getPreferredName();
     }
 
-    public static Optional<DegradedRouterQueryBuilder> 
fromXContent(QueryParseContext parseContext, OsService osService) throws 
IOException {
+    public static Optional<DegradedRouterQueryBuilder> 
fromXContent(QueryParseContext parseContext, LoadService loadService) throws 
IOException {
         final Optional<DegradedRouterQueryBuilder> builder = 
AbstractRouterQueryBuilder.fromXContent(PARSER, parseContext);
-        builder.ifPresent((b) -> b.osService = osService);
+        // TODO: I wish this could be done in the constructor so construction 
guarantees a good object .. but
+        // the parser is initialized statically.
+        builder.ifPresent((b) -> b.loadService = loadService);
         return builder;
     }
 
     @Override
     public QueryBuilder doRewrite(QueryRewriteContext context) throws 
IOException {
-        // TODO: osService is maybe not the best source of data. The stats 
method
-        // is syncronized, which seems undesirable for the use case of node 
queries
-        // which may happen 100 times a second. Maybe implement something
-        // with volatile, or ReadWriteLock?
-        OsStats.Cpu cpu = osService.stats().getCpu();
-        return super.doRewrite(c -> c.test(cpu));
+        LoadStats stats = loadService.stats();
+        return super.doRewrite(c -> c.test(stats));
     }
 
-    @EqualsAndHashCode(callSuper=true)
     @Getter
+    @EqualsAndHashCode(callSuper=true)
     static class DegradedCondition extends Condition {
         private final DegradedConditionType type;
 
@@ -108,8 +107,8 @@
             type.writeTo(out);
         }
 
-        public boolean test(OsStats.Cpu cpu) {
-            return test(type.extract(cpu));
+        public boolean test(LoadStats stats) {
+            return test(type.extract(stats));
         }
 
         void addXContent(XContentBuilder builder, Params params) throws 
IOException {
@@ -117,24 +116,26 @@
         }
     }
 
+    // TODO: this name is horrible
     @FunctionalInterface
-    private interface CpuStatExtractor {
-        int extract(OsStats.Cpu cpu);
+    private interface LoadStatsExtractor {
+        int extract(LoadStats stats);
     }
 
-    enum DegradedConditionType implements CpuStatExtractor, Writeable {
-        // TODO: These should both be "regularized" such that the same 
constraint is valid on machines with 2 cpus and 48 cpus
-        cpu(OsStats.Cpu::getPercent),
-        load((cpu) -> (int) Math.round(cpu.getLoadAverage()[0]));
+    enum DegradedConditionType implements LoadStatsExtractor, Writeable {
+        p95(LoadStats::getP95ms),
+        p99(LoadStats::getP99ms),
+        cpu(LoadStats::getCpuPercent),
+        load((stats) -> (int) Math.round(stats.getLoadAverage()[0]));
 
-        private CpuStatExtractor extractor;
+        private LoadStatsExtractor extractor;
 
-        DegradedConditionType(CpuStatExtractor extractor) {
+        DegradedConditionType(LoadStatsExtractor extractor) {
             this.extractor = extractor;
         }
 
-        public int extract(OsStats.Cpu cpu) {
-            return extractor.extract(cpu);
+        public int extract(LoadStats stats) {
+            return extractor.extract(stats);
         }
 
         @Override
diff --git 
a/src/main/java/org/wikimedia/search/extra/router/monitor/LoadService.java 
b/src/main/java/org/wikimedia/search/extra/router/monitor/LoadService.java
new file mode 100644
index 0000000..c3005db
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/router/monitor/LoadService.java
@@ -0,0 +1,72 @@
+package org.wikimedia.search.extra.router.monitor;
+
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.SingleObjectCache;
+import org.elasticsearch.monitor.os.OsProbe;
+import org.elasticsearch.monitor.os.OsStats;
+
+import java.util.Objects;
+
+
+public class LoadService {
+    public static final Setting<TimeValue> REFRESH_INTERVAL_SETTING =
+            Setting.timeSetting("monitor.load.refresh_interval", 
TimeValue.timeValueSeconds(20),
+                    TimeValue.timeValueSeconds(20), 
Setting.Property.NodeScope, Setting.Property.Final);
+
+    private final LoadStatsCache loadStatsCache;
+    private final LoadProbe probe;
+
+    public LoadService(Settings settings, LatencyProbe latencyProbe, OsProbe 
osProbe) {
+        this.probe = new LoadProbe(latencyProbe, osProbe);
+        TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
+        this.loadStatsCache = new LoadStatsCache(refreshInterval, 
probe.stats());
+    }
+
+    // syncronized seems less than ideal, but would a RW Lock be any better?
+    public synchronized LoadStats stats() {
+        return loadStatsCache.getOrRefresh();
+    }
+
+    // TODO: While this is elasticsearchs default way of handling things, I 
think
+    // we would prefer an async refresh and serving queries whatever is most 
recent,
+    // like how the histogram rotates?
+    private class LoadStatsCache extends SingleObjectCache<LoadStats> {
+        LoadStatsCache(TimeValue interval, LoadStats initValue) {
+            super(interval, initValue);
+        }
+
+        @Override
+        protected LoadStats refresh() {
+            return probe.stats();
+        }
+    }
+
+    public interface LatencyProbe {
+        double getQueryPercentile(double percentile);
+        double getQueryPercentile(double percentile, int numBuckets);
+    }
+
+    class LoadProbe {
+        LatencyProbe latencyProbe;
+        OsProbe osProbe;
+
+        LoadProbe(LatencyProbe latencyProbe, OsProbe osProbe) {
+            Objects.requireNonNull(latencyProbe);
+            Objects.requireNonNull(osProbe);
+            this.latencyProbe = latencyProbe;
+            this.osProbe = osProbe;
+        }
+
+
+        LoadStats stats() {
+            OsStats.Cpu cpu = osProbe.osStats().getCpu();
+            return new LoadStats(
+                    latencyProbe.getQueryPercentile(0.95) / 
TimeValue.NSEC_PER_MSEC,
+                    latencyProbe.getQueryPercentile(0.99) / 
TimeValue.NSEC_PER_MSEC,
+                    cpu.getPercent(),
+                    cpu.getLoadAverage());
+        }
+    }
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/router/monitor/LoadStats.java 
b/src/main/java/org/wikimedia/search/extra/router/monitor/LoadStats.java
new file mode 100644
index 0000000..1d3ec4c
--- /dev/null
+++ b/src/main/java/org/wikimedia/search/extra/router/monitor/LoadStats.java
@@ -0,0 +1,22 @@
+package org.wikimedia.search.extra.router.monitor;
+
+import lombok.Getter;
+
+@Getter
+public class LoadStats {
+    private int p95ms;
+    private int p99ms;
+    private short cpuPercent;
+    private double[] loadAverage;
+
+    public LoadStats(double p95ms, double p99ms, short cpuPercent, double[] 
loadAverage) {
+        this((int)p95ms, (int) p99ms, cpuPercent, loadAverage);
+    }
+
+    public LoadStats(int p95ms, int p99ms, short cpuPercent, double[] 
loadAverage) {
+        this.p95ms = p95ms;
+        this.p99ms = p99ms;
+        this.cpuPercent = cpuPercent;
+        this.loadAverage = loadAverage;
+    }
+}
diff --git 
a/src/main/java/org/wikimedia/search/extra/router/monitor/SearchLatencyListener.java
 
b/src/main/java/org/wikimedia/search/extra/router/monitor/SearchLatencyListener.java
new file mode 100644
index 0000000..01e2977
--- /dev/null
+++ 
b/src/main/java/org/wikimedia/search/extra/router/monitor/SearchLatencyListener.java
@@ -0,0 +1,167 @@
+package org.wikimedia.search.extra.router.monitor;
+
+import com.google.common.util.concurrent.AbstractScheduledService;
+import com.google.common.util.concurrent.AbstractScheduledService.Scheduler;
+import org.HdrHistogram.Histogram;
+import org.HdrHistogram.Recorder;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.SearchOperationListener;
+import org.elasticsearch.search.internal.SearchContext;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Consumer;
+import java.util.function.ToDoubleFunction;
+
+// Current settings amount to ~ 262656 bytes. Pre-converting to ms and using
+// a single significant digit might be better. But what does that mean?
+//
+// TODO: Multiple latency buckets.
+public class SearchLatencyListener extends AbstractLifecycleComponent 
implements LoadService.LatencyProbe, SearchOperationListener {
+    public static final Setting<Integer> NUM_LATENCY_BUCKETS = 
Setting.intSetting(
+            "search.extra.latency_percentiles.num_buckets", 5, 1, 300,
+            Setting.Property.NodeScope, Setting.Property.Final);
+    public static final Setting<TimeValue> ROTATION_DELAY = 
Setting.timeSetting(
+            "search.extra.latency_percentiles.rotation_delay", 
TimeValue.timeValueMinutes(1),
+            TimeValue.timeValueMillis(100), Setting.Property.NodeScope, 
Setting.Property.Final);
+
+    private static final long HIGHEST_TRACKABLE_VALUE = 
TimeValue.timeValueMinutes(30).nanos();
+    private static final long LOWEST_DISCERNABLE_VALUE = 
TimeValue.timeValueMillis(500).nanos();
+    private static final int SIGNIFICANT_DIGITS = 2;
+
+    private final int minutesOfHistory;
+    private final Recorder recorder;
+    private final RotatingList<Histogram> list;
+
+    public SearchLatencyListener(Settings settings, int minutesOfHistory) {
+        super(settings);
+        this.minutesOfHistory = NUM_LATENCY_BUCKETS.get(settings);
+        recorder = new Recorder(LOWEST_DISCERNABLE_VALUE, 
HIGHEST_TRACKABLE_VALUE, SIGNIFICANT_DIGITS);
+        List<Histogram> items = new ArrayList<>(minutesOfHistory);
+        for (int i = 0; i < minutesOfHistory; i++) {
+            items.add(new Histogram(LOWEST_DISCERNABLE_VALUE, 
HIGHEST_TRACKABLE_VALUE, SIGNIFICANT_DIGITS));
+        }
+        TimeValue rotationDelay = ROTATION_DELAY.get(settings);
+        list = new RotatingList<>(items, recorder::getIntervalHistogramInto,
+                Scheduler.newFixedDelaySchedule(rotationDelay.millis(), 
rotationDelay.millis(), TimeUnit.MILLISECONDS));
+    }
+
+    @Override
+    protected void doStart() {
+        list.startRotating();
+    }
+
+    @Override
+    protected void doStop() {
+        list.stopRotating();
+    }
+
+    @Override
+    protected void doClose() {
+        // Should we do anything for final shutdown?
+    }
+
+    public double getQueryPercentile(double percentile) {
+        // TODO: Would a weighted average make more sense, to take recency 
into account?
+        return list.sum(h -> h.getValueAtPercentile(percentile)) / list.size();
+    }
+
+    public double getQueryPercentile(double percentile, int numBuckets) {
+        // Or throw an out of bounds exception?
+        numBuckets = Math.min(minutesOfHistory, numBuckets);
+        return list.sum(h -> h.getValueAtPercentile(percentile), numBuckets) / 
numBuckets;
+    }
+
+    public void onQueryPhase(SearchContext searchContext, long tookInNanos) {
+        recorder.recordValue(tookInNanos);
+    }
+
+    private class RotatingList<T> {
+        private final List<T> list;
+        private final Consumer<T> updater;
+        private final ReadWriteLock lock;
+        private final ScheduledProcedure scheduledProcedure;
+
+        RotatingList(List<T> list, Consumer<T> updater, Scheduler scheduler) {
+            this.list = list;
+            this.updater = updater;
+            this.lock = new ReentrantReadWriteLock();
+            this.scheduledProcedure = new ScheduledProcedure(this::update, 
scheduler);
+        }
+
+        void startRotating() {
+            scheduledProcedure.startAsync();
+        }
+
+        void stopRotating() {
+            scheduledProcedure.stopAsync();
+        }
+
+        void update() {
+            lock.writeLock().lock();
+            try {
+                // Move last item to the beginning
+                Collections.rotate(list, 1);
+                // Update it
+                updater.accept(list.get(0));
+            }
+            finally {
+                lock.writeLock().unlock();
+            }
+        }
+
+        int size() {
+            return list.size();
+        }
+
+        double sum(ToDoubleFunction<T> func) {
+            return sum(func, list.size());
+        }
+
+        double sum(ToDoubleFunction<T> func, int numItems) {
+            lock.readLock().lock();
+            try {
+                double sum = 0D;
+                for(T item : list.subList(0, numItems)) {
+                    sum += func.applyAsDouble(item);
+                }
+                return sum;
+            }
+            finally {
+                lock.readLock().unlock();
+            }
+        }
+    }
+
+    @FunctionalInterface
+    interface Procedure {
+        void apply();
+    }
+
+    static class ScheduledProcedure extends AbstractScheduledService {
+        Procedure procedure;
+        Scheduler scheduler;
+
+        ScheduledProcedure(Procedure procedure, Scheduler scheduler) {
+            this.procedure = procedure;
+            this.scheduler = scheduler;
+        }
+
+        @Override
+        protected void runOneIteration() {
+            procedure.apply();
+        }
+
+        @Override
+        protected Scheduler scheduler() {
+            return scheduler;
+        }
+    }
+}
diff --git 
a/src/test/java/org/wikimedia/search/extra/router/DegradedRouterBuilderESTest.java
 
b/src/test/java/org/wikimedia/search/extra/router/DegradedRouterBuilderESTest.java
index 6e44839..d3bcbb0 100644
--- 
a/src/test/java/org/wikimedia/search/extra/router/DegradedRouterBuilderESTest.java
+++ 
b/src/test/java/org/wikimedia/search/extra/router/DegradedRouterBuilderESTest.java
@@ -9,6 +9,7 @@
 import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.query.*;
+import org.elasticsearch.monitor.os.OsProbe;
 import org.elasticsearch.monitor.os.OsService;
 import org.elasticsearch.monitor.os.OsStats;
 import org.elasticsearch.plugins.Plugin;
@@ -18,6 +19,9 @@
 import org.wikimedia.search.extra.ExtraPlugin;
 import 
org.wikimedia.search.extra.router.AbstractRouterQueryBuilder.ConditionDefinition;
 import 
org.wikimedia.search.extra.router.DegradedRouterQueryBuilder.DegradedConditionType;
+import org.wikimedia.search.extra.router.monitor.LoadService;
+import org.wikimedia.search.extra.router.monitor.LoadStats;
+import org.wikimedia.search.extra.router.monitor.SearchLatencyListener;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -59,10 +63,10 @@
 
     @Override
     protected void doAssertLuceneQuery(DegradedRouterQueryBuilder builder, 
Query query, SearchContext context) throws IOException {
-        OsStats.Cpu cpu = builder.osService().stats().getCpu();
+        LoadStats stats = builder.loadService().stats();
 
         Optional<DegradedRouterQueryBuilder.DegradedCondition> cond = 
builder.conditionStream()
-                .filter(x -> x.test(cpu))
+                .filter(x -> x.test(stats))
                 .findFirst();
 
         query = rewrite(query);
@@ -107,7 +111,7 @@
 
     private DegradedRouterQueryBuilder newBuilder() {
         DegradedRouterQueryBuilder builder = new DegradedRouterQueryBuilder();
-        builder.osService(mockOsService());
+        builder.loadService(mockLoadService());
         return builder;
     }
 
@@ -125,28 +129,38 @@
     }
 
 
-    private OsService mockOsService() {
+    private LoadService mockLoadService() {
+        int p95 = randomIntBetween(100, 400);
+        int p99 = randomIntBetween(p95+1, 1000);
         double loadAvg = randomDoubleBetween(0D, 50D, false);
         double[] loadAvgs = {loadAvg, loadAvg, loadAvg};
-        OsStats stats = new OsStats(0,
-                new OsStats.Cpu((short) randomIntBetween(0, 100), loadAvgs),
-                new OsStats.Mem(0, 0),
-                new OsStats.Swap(0L, 0L),
-                new OsStats.Cgroup("", 0L, "", 0L, 0L,
-                        new OsStats.Cgroup.CpuStat(0L, 0L, 0L)));
-        return new MockOsService(stats);
+        LoadStats stats = new LoadStats(p95, p99, (short) randomIntBetween(0, 
100), loadAvgs);
+        return new MockLoadService(stats);
     }
 
-    private class MockOsService extends OsService {
-        OsStats stats;
+    private class MockLatencyProbe implements LoadService.LatencyProbe {
+        @Override
+        public double getQueryPercentile(double percentile) {
+            return 0;
+        }
 
-        MockOsService(OsStats stats) {
-            super(Settings.EMPTY);
+        @Override
+        public double getQueryPercentile(double percentile, int numBuckets) {
+            return 0;
+        }
+    }
+
+    private class MockLoadService extends LoadService {
+        LoadStats stats;
+
+        MockLoadService(LoadStats stats) {
+            // This probes wont actually be called
+            super(Settings.EMPTY, new MockLatencyProbe(), 
OsProbe.getInstance());
             this.stats = stats;
         }
 
         @Override
-        public synchronized OsStats stats() {
+        public synchronized LoadStats stats() {
             return stats;
         }
     }
diff --git 
a/src/test/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilderParserTest.java
 
b/src/test/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilderParserTest.java
index 9ed8c1d..a2e862e 100644
--- 
a/src/test/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilderParserTest.java
+++ 
b/src/test/java/org/wikimedia/search/extra/router/DegradedRouterQueryBuilderParserTest.java
@@ -39,7 +39,7 @@
         QueryBuilder builder = optional.get();
         assertThat(builder, instanceOf(DegradedRouterQueryBuilder.class));
         DegradedRouterQueryBuilder qb = (DegradedRouterQueryBuilder) builder;
-        assertNotNull(qb.osService());
+        assertNotNull(qb.loadService());
         assertEquals(1, qb.conditionStream().count());
         DegradedRouterQueryBuilder.DegradedCondition cond = 
qb.conditionStream().findFirst().get();
         assertEquals(DegradedConditionType.cpu, cond.type());
diff --git 
a/src/test/java/org/wikimedia/search/extra/router/TokenCountRouterParserTest.java
 
b/src/test/java/org/wikimedia/search/extra/router/TokenCountRouterParserTest.java
index 0823641..49865f3 100644
--- 
a/src/test/java/org/wikimedia/search/extra/router/TokenCountRouterParserTest.java
+++ 
b/src/test/java/org/wikimedia/search/extra/router/TokenCountRouterParserTest.java
@@ -6,6 +6,7 @@
 import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.index.query.QueryBuilders;
 import org.junit.Test;
+
 import org.wikimedia.search.extra.QueryBuilderTestUtils;
 import org.wikimedia.search.extra.router.AbstractRouterQueryBuilder.Condition;
 

-- 
To view, visit https://gerrit.wikimedia.org/r/370986
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I70ae1b148e1f15ea77f2fec5315c47faaaa7b3dc
Gerrit-PatchSet: 1
Gerrit-Project: search/extra
Gerrit-Branch: master
Gerrit-Owner: EBernhardson <ebernhard...@wikimedia.org>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to