gianm closed pull request #5963: [Backport] topN: Fix caching of Float 
dimension values
URL: https://github.com/apache/incubator-druid/pull/5963
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/processing/src/main/java/io/druid/query/topn/TopNQueryQueryToolChest.java 
b/processing/src/main/java/io/druid/query/topn/TopNQueryQueryToolChest.java
index 81761afc6a3..774543d3239 100644
--- a/processing/src/main/java/io/druid/query/topn/TopNQueryQueryToolChest.java
+++ b/processing/src/main/java/io/druid/query/topn/TopNQueryQueryToolChest.java
@@ -384,6 +384,11 @@ public Object apply(final Result<TopNResultValue> input)
             Iterator<Object> inputIter = results.iterator();
             DateTime timestamp = granularity.toDateTime(((Number) 
inputIter.next()).longValue());
 
+            // Need a value transformer to convert generic 
Jackson-deserialized type into the proper type.
+            final Function<Object, Object> dimValueTransformer = 
TopNMapFn.getValueTransformer(
+                query.getDimensionSpec().getOutputType()
+            );
+
             while (inputIter.hasNext()) {
               List<Object> result = (List<Object>) inputIter.next();
               Map<String, Object> vals = Maps.newLinkedHashMap();
@@ -391,7 +396,7 @@ public Object apply(final Result<TopNResultValue> input)
               Iterator<AggregatorFactory> aggIter = aggs.iterator();
               Iterator<Object> resultIter = result.iterator();
 
-              vals.put(query.getDimensionSpec().getOutputName(), 
resultIter.next());
+              vals.put(query.getDimensionSpec().getOutputName(), 
dimValueTransformer.apply(resultIter.next()));
 
               while (aggIter.hasNext() && resultIter.hasNext()) {
                 final AggregatorFactory factory = aggIter.next();
diff --git 
a/processing/src/test/java/io/druid/query/topn/TopNQueryQueryToolChestTest.java 
b/processing/src/test/java/io/druid/query/topn/TopNQueryQueryToolChestTest.java
index 8938390986a..8b85430345a 100644
--- 
a/processing/src/test/java/io/druid/query/topn/TopNQueryQueryToolChestTest.java
+++ 
b/processing/src/test/java/io/druid/query/topn/TopNQueryQueryToolChestTest.java
@@ -47,9 +47,11 @@
 import io.druid.segment.TestHelper;
 import io.druid.segment.TestIndex;
 import io.druid.segment.VirtualColumns;
+import io.druid.segment.column.ValueType;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.Map;
 
@@ -61,49 +63,15 @@
   @Test
   public void testCacheStrategy() throws Exception
   {
-    CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy =
-        new TopNQueryQueryToolChest(null, null).getCacheStrategy(
-            new TopNQuery(
-                new TableDataSource("dummy"),
-                VirtualColumns.EMPTY,
-                new DefaultDimensionSpec("test", "test"),
-                new NumericTopNMetricSpec("metric1"),
-                3,
-                new 
MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))),
-                null,
-                Granularities.ALL,
-                ImmutableList.<AggregatorFactory>of(new 
CountAggregatorFactory("metric1")),
-                ImmutableList.<PostAggregator>of(new 
ConstantPostAggregator("post", 10)),
-                null
-            )
-        );
-
-    final Result<TopNResultValue> result = new Result<>(
-        // test timestamps that result in integer size millis
-        DateTimes.utc(123L),
-        new TopNResultValue(
-            Arrays.asList(
-                ImmutableMap.<String, Object>of(
-                    "test", "val1",
-                    "metric1", 2
-                )
-            )
-        )
-    );
-
-    Object preparedValue = strategy.prepareForCache().apply(
-        result
-    );
-
-    ObjectMapper objectMapper = TestHelper.makeJsonMapper();
-    Object fromCacheValue = objectMapper.readValue(
-        objectMapper.writeValueAsBytes(preparedValue),
-        strategy.getCacheObjectClazz()
-    );
-
-    Result<TopNResultValue> fromCacheResult = 
strategy.pullFromCache().apply(fromCacheValue);
+    doTestCacheStrategy(ValueType.STRING, "val1");
+    doTestCacheStrategy(ValueType.FLOAT, 2.1f);
+    doTestCacheStrategy(ValueType.DOUBLE, 2.1d);
+    doTestCacheStrategy(ValueType.LONG, 2L);
+  }
 
-    Assert.assertEquals(result, fromCacheResult);
+  @Test
+  public void testCacheStrategyWithFloatDimension() throws Exception
+  {
   }
 
   @Test
@@ -215,6 +183,78 @@ public void testMinTopNThreshold() throws Exception
     Assert.assertEquals(2000, mockRunner.query.getThreshold());
   }
 
+  private void doTestCacheStrategy(final ValueType valueType, final Object 
dimValue) throws IOException
+  {
+    CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy =
+        new TopNQueryQueryToolChest(null, null).getCacheStrategy(
+            new TopNQuery(
+                new TableDataSource("dummy"),
+                VirtualColumns.EMPTY,
+                new DefaultDimensionSpec("test", "test", valueType),
+                new NumericTopNMetricSpec("metric1"),
+                3,
+                new 
MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))),
+                null,
+                Granularities.ALL,
+                ImmutableList.<AggregatorFactory>of(new 
CountAggregatorFactory("metric1")),
+                ImmutableList.<PostAggregator>of(new 
ConstantPostAggregator("post", 10)),
+                null
+            )
+        );
+
+    final Result<TopNResultValue> result1 = new Result<>(
+        // test timestamps that result in integer size millis
+        DateTimes.utc(123L),
+        new TopNResultValue(
+            Arrays.asList(
+                ImmutableMap.<String, Object>of(
+                    "test", dimValue,
+                    "metric1", 2
+                )
+            )
+        )
+    );
+
+    Object preparedValue = strategy.prepareForCache().apply(
+        result1
+    );
+
+    ObjectMapper objectMapper = TestHelper.makeJsonMapper();
+    Object fromCacheValue = objectMapper.readValue(
+        objectMapper.writeValueAsBytes(preparedValue),
+        strategy.getCacheObjectClazz()
+    );
+
+    Result<TopNResultValue> fromCacheResult = 
strategy.pullFromCache().apply(fromCacheValue);
+
+    Assert.assertEquals(result1, fromCacheResult);
+
+    final Result<TopNResultValue> result2 = new Result<>(
+        // test timestamps that result in integer size millis
+        DateTimes.utc(123L),
+        new TopNResultValue(
+            Arrays.asList(
+                ImmutableMap.<String, Object>of(
+                    "test", dimValue,
+                    "metric1", 2
+                )
+            )
+        )
+    );
+
+    Object preparedResultCacheValue = strategy.prepareForCache().apply(
+        result2
+    );
+
+    Object fromResultCacheValue = objectMapper.readValue(
+        objectMapper.writeValueAsBytes(preparedResultCacheValue),
+        strategy.getCacheObjectClazz()
+    );
+
+    Result<TopNResultValue> fromResultCacheResult = 
strategy.pullFromCache().apply(fromResultCacheValue);
+    Assert.assertEquals(result2, fromResultCacheResult);
+  }
+
   static class MockQueryRunner implements QueryRunner<Result<TopNResultValue>>
   {
     private final QueryRunner<Result<TopNResultValue>> runner;


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@druid.apache.org
For additional commands, e-mail: dev-h...@druid.apache.org

Reply via email to