zentol commented on a change in pull request #8446: [FLINK-12414] [runtime] 
Implement ExecutionGraph to SchedulingTopology
URL: https://github.com/apache/flink/pull/8446#discussion_r284689769
 
 

 ##########
 File path: 
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adapter/DefaultExecutionVertexTest.java
 ##########
 @@ -18,131 +18,112 @@
 
 package org.apache.flink.runtime.scheduler.adapter;
 
-import org.apache.flink.api.common.JobID;
 import org.apache.flink.runtime.execution.ExecutionState;
-import org.apache.flink.runtime.executiongraph.ExecutionEdge;
-import org.apache.flink.runtime.executiongraph.ExecutionGraph;
-import org.apache.flink.runtime.executiongraph.ExecutionVertex;
-import org.apache.flink.runtime.executiongraph.IntermediateResultPartition;
-import org.apache.flink.runtime.executiongraph.TestRestartStrategy;
-import 
org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway;
+import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
 import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
-import org.apache.flink.runtime.jobgraph.JobVertex;
+import org.apache.flink.runtime.jobgraph.JobVertexID;
+import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID;
 import org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex;
 import org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition;
+import org.apache.flink.util.TestLogger;
 
 import org.junit.Before;
 import org.junit.Test;
 
+import javax.xml.ws.Provider;
+
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
-import java.util.Random;
-import java.util.Set;
 import java.util.stream.Collectors;
 
 import static org.apache.flink.api.common.InputDependencyConstraint.ALL;
-import static org.apache.flink.api.common.InputDependencyConstraint.ANY;
-import static 
org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createNoOpVertex;
-import static 
org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createSimpleTestGraph;
-import static 
org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.setVertexState;
 import static 
org.apache.flink.runtime.io.network.partition.ResultPartitionType.BLOCKING;
-import static org.apache.flink.runtime.jobgraph.DistributionPattern.ALL_TO_ALL;
 import static org.hamcrest.Matchers.containsInAnyOrder;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
 /**
  * Unit tests for {@link DefaultExecutionVertex}.
  */
-public class DefaultExecutionVertexTest {
-
-       private final SimpleAckingTaskManagerGateway taskManagerGateway = new 
SimpleAckingTaskManagerGateway();
-
-       private final TestRestartStrategy triggeredRestartStrategy = 
TestRestartStrategy.manuallyTriggered();
+public class DefaultExecutionVertexTest extends TestLogger {
 
-       private final int parallelism = 3;
+       private final ExecutionStateProviderTest stateProvider = new 
ExecutionStateProviderTest();
 
        private List<SchedulingExecutionVertex> schedulingExecutionVertices;
 
-       private List<ExecutionVertex> executionVertices;
+       private IntermediateResultPartitionID intermediateResultPartitionId;
 
        @Before
        public void setUp() throws Exception {
-               JobVertex[] jobVertices = new JobVertex[2];
-               jobVertices[0] = createNoOpVertex(parallelism);
-               jobVertices[1] = createNoOpVertex(parallelism);
-               jobVertices[1].connectNewDataSetAsInput(jobVertices[0], 
ALL_TO_ALL, BLOCKING);
-               jobVertices[0].setInputDependencyConstraint(ALL);
-               jobVertices[1].setInputDependencyConstraint(ANY);
-               ExecutionGraph executionGraph = createSimpleTestGraph(
-                       new JobID(),
-                       taskManagerGateway,
-                       triggeredRestartStrategy,
-                       jobVertices);
-               ExecutionGraphToSchedulingTopologyAdapter adapter = new 
ExecutionGraphToSchedulingTopologyAdapter(executionGraph);
-
-               schedulingExecutionVertices = new ArrayList<>();
-               adapter.getVertices().forEach(vertex -> 
schedulingExecutionVertices.add(vertex));
-               executionVertices = new ArrayList<>();
-               executionGraph.getAllExecutionVertices().forEach(vertex -> 
executionVertices.add(vertex));
-       }
 
-       @Test
-       public void testGetId() {
-               for (int idx = 0; idx < schedulingExecutionVertices.size(); 
idx++){
-                       
assertEquals(schedulingExecutionVertices.get(idx).getId().getJobVertexId(),
-                               executionVertices.get(idx).getJobvertexId());
-                       
assertEquals(schedulingExecutionVertices.get(idx).getId().getSubtaskIndex(),
-                               
executionVertices.get(idx).getParallelSubtaskIndex());
-               }
+               schedulingExecutionVertices = new ArrayList<>(2);
 
 Review comment:
   with one suggestion below applied, we never iterate over the array we could 
just have 2 fields (`producer` and `consumer`), is a bit easier to read

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to