[ 
https://issues.apache.org/jira/browse/FLINK-2853?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14964471#comment-14964471
 ] 

ASF GitHub Bot commented on FLINK-2853:
---------------------------------------

Github user gallenvara commented on a diff in the pull request:

    https://github.com/apache/flink/pull/1267#discussion_r42453815
  
    --- Diff: 
flink-benchmark/src/test/java/org/apache/flink/benchmark/runtime/operates/hash/MutableHashTablePerformanceBenchmark.java
 ---
    @@ -0,0 +1,361 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.flink.benchmark.runtime.operates.hash;
    +
    +import java.io.IOException;
    +import java.util.List;
    +import java.util.concurrent.TimeUnit;
    +
    +import org.apache.flink.api.common.typeutils.TypeComparator;
    +import org.apache.flink.api.common.typeutils.TypePairComparator;
    +import org.apache.flink.api.common.typeutils.TypeSerializer;
    +import org.apache.flink.core.memory.MemorySegment;
    +import org.apache.flink.runtime.io.disk.iomanager.IOManager;
    +import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync;
    +import org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable;
    +import org.apache.flink.runtime.memory.MemoryAllocationException;
    +import org.apache.flink.runtime.memory.MemoryManager;
    +import org.apache.flink.runtime.operators.hash.MutableHashTable;
    +import org.apache.flink.runtime.operators.testutils.DummyInvokable;
    +import org.apache.flink.runtime.operators.testutils.types.StringPair;
    +import 
org.apache.flink.runtime.operators.testutils.types.StringPairComparator;
    +import 
org.apache.flink.runtime.operators.testutils.types.StringPairPairComparator;
    +import 
org.apache.flink.runtime.operators.testutils.types.StringPairSerializer;
    +import org.apache.flink.util.MutableObjectIterator;
    +
    +import org.junit.Assert;
    +import org.openjdk.jmh.annotations.*;
    +import org.openjdk.jmh.runner.Runner;
    +import org.openjdk.jmh.runner.options.Options;
    +import org.openjdk.jmh.runner.options.OptionsBuilder;
    +
    +import static org.junit.Assert.fail;
    +
    +@State(Scope.Thread)
    +@BenchmarkMode(Mode.AverageTime)
    +@OutputTimeUnit(TimeUnit.MILLISECONDS)
    +public class MutableHashTablePerformanceBenchmark {
    +
    +   private static final AbstractInvokable MEM_OWNER = new DummyInvokable();
    +
    +   private MemoryManager memManager;
    +   private IOManager ioManager;
    +
    +   private TypeSerializer<StringPair> pairBuildSideAccesssor;
    +   private TypeSerializer<StringPair> pairProbeSideAccesssor;
    +   private TypeComparator<StringPair> pairBuildSideComparator;
    +   private TypeComparator<StringPair> pairProbeSideComparator;
    +   private TypePairComparator<StringPair, StringPair> pairComparator;
    +
    +   private static final String COMMENT = "this comments should contains a 
96 byte data, 100 plus another integer value and seperator char.";
    +
    +
    +   @Setup
    +   public void setup() {
    +           this.pairBuildSideAccesssor = new StringPairSerializer();
    +           this.pairProbeSideAccesssor = new StringPairSerializer();
    +           this.pairBuildSideComparator = new StringPairComparator();
    +           this.pairProbeSideComparator = new StringPairComparator();
    +           this.pairComparator = new StringPairPairComparator();
    +
    +           this.memManager = new MemoryManager(64 * 1024 * 1024, 1);
    +           this.ioManager = new IOManagerAsync();
    +   }
    +
    +   @TearDown
    +   public void tearDown() {
    +           // shut down I/O manager and Memory Manager and verify the 
correct shutdown
    +           this.ioManager.shutdown();
    +           if (!this.ioManager.isProperlyShutDown()) {
    +                   fail("I/O manager was not property shut down.");
    +           }
    +           if (!this.memManager.verifyEmpty()) {
    +                   fail("Not all memory was properly released to the 
memory manager --> Memory Leak.");
    +           }
    +   }
    +
    +   @Benchmark
    +   public void compareMutableHashTableWithBloomFilter1() throws 
IOException {
    +           // ----------------------------------------------90% filtered 
during probe spill phase-----------------------------------------
    +           // create a build input with 1000000 records with key spread 
between [0 -- 10000000] with step of 10 for nearby records.
    +           int buildSize = 1000000;
    +           int buildStep = 10;
    +           int buildScope = buildStep * buildSize;
    +           // create a probe input with 5000000 records with key spread 
between [0 -- 1000000] with distance of 1 for nearby records.
    +           int probeSize = 5000000;
    +           int probeStep = 1;
    +           int probeScope = buildSize;
    +
    +           int expectedResult = 500000;
    +
    +           this.hybridHashJoin(buildSize, buildStep, buildScope, 
probeSize, probeStep, probeScope, expectedResult, true);
    +
    +           System.out.println("HybridHashJoin2:");
    --- End diff --
    
    Yes,that's right.I have submitted a new PR for correcting these problems.


> Apply JMH on MutableHashTablePerformanceBenchmark class.
> --------------------------------------------------------
>
>                 Key: FLINK-2853
>                 URL: https://issues.apache.org/jira/browse/FLINK-2853
>             Project: Flink
>          Issue Type: Sub-task
>          Components: Tests
>            Reporter: GaoLun
>            Assignee: GaoLun
>            Priority: Minor
>              Labels: easyfix
>
> JMH is a Java harness for building, running, and analysing 
> nano/micro/milli/macro benchmarks.Use JMH to replace the old micro benchmarks 
> method in order to get much more accurate results.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to