[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-04 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86646115
  
--- Diff: 
exec/java-exec/src/test/java/org/apache/drill/exec/planner/fragment/TestLocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,476 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import mockit.Mocked;
+import mockit.NonStrictExpectations;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Collections;
+
+import static java.lang.Integer.MAX_VALUE;
+import static org.apache.drill.exec.ExecConstants.SLICE_TARGET_DEFAULT;
+import static 
org.apache.drill.exec.planner.fragment.LocalAffinityFragmentParallelizer.INSTANCE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+
+public class TestLocalAffinityFragmentParallelizer {
+
+// Create a set of test endpoints
+private static final DrillbitEndpoint DEP1 = 
newDrillbitEndpoint("node1", 30010);
+private static final DrillbitEndpoint DEP2 = 
newDrillbitEndpoint("node2", 30010);
+private static final DrillbitEndpoint DEP3 = 
newDrillbitEndpoint("node3", 30010);
+private static final DrillbitEndpoint DEP4 = 
newDrillbitEndpoint("node4", 30010);
+private static final DrillbitEndpoint DEP5 = 
newDrillbitEndpoint("node5", 30010);
+
+@Mocked private Fragment fragment;
+@Mocked private PhysicalOperator root;
+
+private static final DrillbitEndpoint newDrillbitEndpoint(String 
address, int port) {
+return 
DrillbitEndpoint.newBuilder().setAddress(address).setControlPort(port).build();
+}
+
+private static final ParallelizationParameters newParameters(final 
long threshold, final int maxWidthPerNode,
+ final int 
maxGlobalWidth) {
+return new ParallelizationParameters() {
+@Override
+public long getSliceTarget() {
+return threshold;
+}
+
+@Override
+public int getMaxWidthPerNode() {
+return maxWidthPerNode;
+}
+
+@Override
+public int getMaxGlobalWidth() {
+return maxGlobalWidth;
+}
+
+/**
+ * {@link LocalAffinityFragmentParallelizer} doesn't use 
affinity factor.
+ * @return
+ */
+@Override
+public double getAffinityFactor() {
+return 0.0f;
+}
+};
+}
+
+private final Wrapper newWrapper(double cost, int minWidth, int 
maxWidth, List endpointAffinities) {
+new NonStrictExpectations() {
+{
+fragment.getRoot(); result = root;
+}
+};
+
+final Wrapper fragmentWrapper = new Wrapper(fragment, 1);
+final Stats stats = fragmentWrapper.getStats();
+stats.setDistributionAffinity(DistributionAffinity.LOCAL);
+stats.addCost(cost);
+stats.addMinWidth(minWidth);
+stats.addMaxWidth(maxWidth);
+stats.addEndpointAffinities(endpointAffinities);
+return fragmentWrapper;
+}
+
+private void checkEndpointAssignments(List 
assignedEndpoints,
+  Map 
expectedAssignments) throws Exception {
+Map endpointAssignments = new 

[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-04 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86602221
  
--- Diff: 
exec/java-exec/src/test/java/org/apache/drill/exec/planner/fragment/TestLocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,476 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import mockit.Mocked;
+import mockit.NonStrictExpectations;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Collections;
+
+import static java.lang.Integer.MAX_VALUE;
+import static org.apache.drill.exec.ExecConstants.SLICE_TARGET_DEFAULT;
+import static 
org.apache.drill.exec.planner.fragment.LocalAffinityFragmentParallelizer.INSTANCE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+
+public class TestLocalAffinityFragmentParallelizer {
+
+// Create a set of test endpoints
+private static final DrillbitEndpoint DEP1 = 
newDrillbitEndpoint("node1", 30010);
+private static final DrillbitEndpoint DEP2 = 
newDrillbitEndpoint("node2", 30010);
+private static final DrillbitEndpoint DEP3 = 
newDrillbitEndpoint("node3", 30010);
+private static final DrillbitEndpoint DEP4 = 
newDrillbitEndpoint("node4", 30010);
+private static final DrillbitEndpoint DEP5 = 
newDrillbitEndpoint("node5", 30010);
+
+@Mocked private Fragment fragment;
+@Mocked private PhysicalOperator root;
+
+private static final DrillbitEndpoint newDrillbitEndpoint(String 
address, int port) {
+return 
DrillbitEndpoint.newBuilder().setAddress(address).setControlPort(port).build();
+}
+
+private static final ParallelizationParameters newParameters(final 
long threshold, final int maxWidthPerNode,
+ final int 
maxGlobalWidth) {
+return new ParallelizationParameters() {
+@Override
+public long getSliceTarget() {
+return threshold;
+}
+
+@Override
+public int getMaxWidthPerNode() {
+return maxWidthPerNode;
+}
+
+@Override
+public int getMaxGlobalWidth() {
+return maxGlobalWidth;
+}
+
+/**
+ * {@link LocalAffinityFragmentParallelizer} doesn't use 
affinity factor.
+ * @return
+ */
+@Override
+public double getAffinityFactor() {
+return 0.0f;
+}
+};
+}
+
+private final Wrapper newWrapper(double cost, int minWidth, int 
maxWidth, List endpointAffinities) {
+new NonStrictExpectations() {
+{
+fragment.getRoot(); result = root;
+}
+};
+
+final Wrapper fragmentWrapper = new Wrapper(fragment, 1);
+final Stats stats = fragmentWrapper.getStats();
+stats.setDistributionAffinity(DistributionAffinity.LOCAL);
+stats.addCost(cost);
+stats.addMinWidth(minWidth);
+stats.addMaxWidth(maxWidth);
+stats.addEndpointAffinities(endpointAffinities);
+return fragmentWrapper;
+}
+
+private void checkEndpointAssignments(List 
assignedEndpoints,
+  Map 
expectedAssignments) throws Exception {
+Map endpointAssignments = new 

[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-04 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86605131
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/LocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Ordering;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.PhysicalOperatorSetupException;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import java.util.Map;
+import java.util.List;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Collections;
+
+/**
+ * Implementation of {@link FragmentParallelizer} where fragment has zero 
or more endpoints.
+ * This is for Parquet Scan Fragments only. Fragment placement is done 
preferring
+ * data locality.
+ */
+public class LocalAffinityFragmentParallelizer implements 
FragmentParallelizer {
+public static final LocalAffinityFragmentParallelizer INSTANCE = new 
LocalAffinityFragmentParallelizer();
+
+// Sort a list of map entries by values.
+Ordering> sortByValues = new 
Ordering>() {
+@Override
+public int compare(Map.Entry left, 
Map.Entry right) {
+return right.getValue().compareTo(left.getValue());
+}
+};
+
+@Override
+public void parallelizeFragment(final Wrapper fragmentWrapper, final 
ParallelizationParameters parameters,
+final Collection 
activeEndpoints) throws PhysicalOperatorSetupException {
+final Stats stats = fragmentWrapper.getStats();
+final ParallelizationInfo parallelizationInfo = 
stats.getParallelizationInfo();
+final Map endpointAffinityMap =
+
fragmentWrapper.getStats().getParallelizationInfo().getEndpointAffinityMap();
+int totalWorkUnits = 0;
+Map endpointPool = new HashMap<>();
+
+// Get the total number of work units and list of endPoints to 
schedule fragments on
+for (Map.Entry epAff : 
endpointAffinityMap.entrySet()) {
+if (epAff.getValue().getNumLocalWorkUnits() > 0) {
+totalWorkUnits += epAff.getValue().getNumLocalWorkUnits();
+endpointPool.put(epAff.getKey(), 
epAff.getValue().getNumLocalWorkUnits());
+}
+}
+
+// Find the parallelization width of fragment
+// 1. Find the parallelization based on cost. Use max cost of all 
operators in this fragment; this is consistent
+//with the calculation that ExcessiveExchangeRemover uses.
+int width = (int) Math.ceil(stats.getMaxCost() / 
parameters.getSliceTarget());
+
+// 2. Cap the parallelization width by fragment level width limit 
and system level per query width limit
+width = Math.min(width, 
Math.min(parallelizationInfo.getMaxWidth(), parameters.getMaxGlobalWidth()));
+
+// 3. Cap the parallelization width by system level per node width 
limit
+width = Math.min(width, parameters.getMaxWidthPerNode() * 
endpointPool.size());
+
+// 4. Make sure width is at least the min width enforced by 
operators
+width = Math.max(parallelizationInfo.getMinWidth(), width);
+
+// 5. Make sure width is at most the max width enforced by 
operators
+width = Math.min(parallelizationInfo.getMaxWidth(), width);
+
+// 6: Finally make sure the width is at least one
+width = Math.max(1, width);
+
+List 

[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-04 Thread ppadma
Github user ppadma commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86597707
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/LocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.PhysicalOperatorSetupException;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import java.util.Map;
+import java.util.List;
+import java.util.Collection;
+import java.util.HashMap;
+
+
+/**
+ * Implementation of {@link FragmentParallelizer} where fragment has zero 
or more endpoints.
+ * This is for Parquet Scan Fragments only. Fragment placement is done 
preferring strict
+ * data locality.
+ */
+public class LocalAffinityFragmentParallelizer implements 
FragmentParallelizer {
+public static final LocalAffinityFragmentParallelizer INSTANCE = new 
LocalAffinityFragmentParallelizer();
+
+@Override
+public void parallelizeFragment(final Wrapper fragmentWrapper, final 
ParallelizationParameters parameters, final Collection 
activeEndpoints) throws PhysicalOperatorSetupException {
+
+// Find the parallelization width of fragment
+final Stats stats = fragmentWrapper.getStats();
+final ParallelizationInfo parallelizationInfo = 
stats.getParallelizationInfo();
+
+// 1. Find the parallelization based on cost. Use max cost of all 
operators in this fragment; this is consistent
+//with the calculation that ExcessiveExchangeRemover uses.
+int width = (int) Math.ceil(stats.getMaxCost() / 
parameters.getSliceTarget());
+
+// 2. Cap the parallelization width by fragment level width limit 
and system level per query width limit
+width = Math.min(width, 
Math.min(parallelizationInfo.getMaxWidth(), parameters.getMaxGlobalWidth()));
+
+// 3. Cap the parallelization width by system level per node width 
limit
+width = Math.min(width, parameters.getMaxWidthPerNode() * 
activeEndpoints.size());
+
+// 4. Make sure width is at least the min width enforced by 
operators
+width = Math.max(parallelizationInfo.getMinWidth(), width);
+
+// 5. Make sure width is at most the max width enforced by 
operators
+width = Math.min(parallelizationInfo.getMaxWidth(), width);
+
+// 6: Finally make sure the width is at least one
+width = Math.max(1, width);
+
+List endpointPool = Lists.newArrayList();
+List assignedEndPoints = Lists.newArrayList();
+
+Map endpointAffinityMap =
+
fragmentWrapper.getStats().getParallelizationInfo().getEndpointAffinityMap();
+
+int totalAssigned = 0;
+int totalWorkUnits = 0;
+
+// Get the total number of work units and list of endPoints to 
schedule fragments on
+for (Map.Entry epAff : 
endpointAffinityMap.entrySet()) {
+if (epAff.getValue().getNumLocalWorkUnits() > 0) {
+totalWorkUnits += epAff.getValue().getNumLocalWorkUnits();
+endpointPool.add(epAff.getKey());
+}
+}
+
+// Keep track of number of fragments allocated to each endpoint.
+Map endpointAssignments = new 
HashMap<>();
+
+// Keep track of how many more to assign to each endpoint.
+Map remainingEndpointAssignments = new 
HashMap<>();
+
+// Calculate the target allocation for each endPoint based on work 
it has to do
+// Assign one fragment (minimum) to all the endPoints 

[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sudheeshkatkam
Github user sudheeshkatkam commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86073435
  
--- Diff: 
contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduGroupScan.java
 ---
@@ -145,6 +145,11 @@ public EndpointByteMap getByteMap() {
 public int compareTo(CompleteWork o) {
   return 0;
 }
+
+@Override
+public DrillbitEndpoint getPreferredEndpoint() {
--- End diff --

Can you add a TODO here?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sudheeshkatkam
Github user sudheeshkatkam commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86074128
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetGroupScan.java
 ---
@@ -822,10 +838,103 @@ private void getFiles(String path, List 
fileStatuses) throws IOExcep
 }
   }
 
+  /*
+   * Figure out the best node to scan each of the rowGroups and update the 
preferredEndpoint.
+   * Based on this, update the total work units assigned to the endpoint 
in the endpointAffinity.
+   */
+  private void computeRowGroupAssignment() {
+Map numEndpointAssignments = 
Maps.newHashMap();
+Map numAssignedBytes = Maps.newHashMap();
+
+// Do this for 2 iterations to adjust node assignments after first 
iteration.
+int numIterartions = 2;
--- End diff --

Iterartions -> iterations


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sudheeshkatkam
Github user sudheeshkatkam commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86074061
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/LocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.PhysicalOperatorSetupException;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import java.util.Map;
+import java.util.List;
+import java.util.Collection;
+import java.util.HashMap;
+
+
+/**
+ * Implementation of {@link FragmentParallelizer} where fragment has zero 
or more endpoints.
+ * This is for Parquet Scan Fragments only. Fragment placement is done 
preferring strict
+ * data locality.
+ */
+public class LocalAffinityFragmentParallelizer implements 
FragmentParallelizer {
+public static final LocalAffinityFragmentParallelizer INSTANCE = new 
LocalAffinityFragmentParallelizer();
+
+@Override
+public void parallelizeFragment(final Wrapper fragmentWrapper, final 
ParallelizationParameters parameters, final Collection 
activeEndpoints) throws PhysicalOperatorSetupException {
+
+// Find the parallelization width of fragment
+final Stats stats = fragmentWrapper.getStats();
+final ParallelizationInfo parallelizationInfo = 
stats.getParallelizationInfo();
+
+// 1. Find the parallelization based on cost. Use max cost of all 
operators in this fragment; this is consistent
+//with the calculation that ExcessiveExchangeRemover uses.
+int width = (int) Math.ceil(stats.getMaxCost() / 
parameters.getSliceTarget());
+
+// 2. Cap the parallelization width by fragment level width limit 
and system level per query width limit
+width = Math.min(width, 
Math.min(parallelizationInfo.getMaxWidth(), parameters.getMaxGlobalWidth()));
+
+// 3. Cap the parallelization width by system level per node width 
limit
+width = Math.min(width, parameters.getMaxWidthPerNode() * 
activeEndpoints.size());
+
+// 4. Make sure width is at least the min width enforced by 
operators
+width = Math.max(parallelizationInfo.getMinWidth(), width);
+
+// 5. Make sure width is at most the max width enforced by 
operators
+width = Math.min(parallelizationInfo.getMaxWidth(), width);
+
+// 6: Finally make sure the width is at least one
+width = Math.max(1, width);
+
+List endpointPool = Lists.newArrayList();
+List assignedEndPoints = Lists.newArrayList();
+
+Map endpointAffinityMap =
+
fragmentWrapper.getStats().getParallelizationInfo().getEndpointAffinityMap();
+
+int totalAssigned = 0;
+int totalWorkUnits = 0;
+
+// Get the total number of work units and list of endPoints to 
schedule fragments on
+for (Map.Entry epAff : 
endpointAffinityMap.entrySet()) {
+if (epAff.getValue().getNumLocalWorkUnits() > 0) {
+totalWorkUnits += epAff.getValue().getNumLocalWorkUnits();
+endpointPool.add(epAff.getKey());
+}
+}
+
+// Keep track of number of fragments allocated to each endpoint.
+Map endpointAssignments = new 
HashMap<>();
+
+// Keep track of how many more to assign to each endpoint.
+Map remainingEndpointAssignments = new 
HashMap<>();
+
+// Calculate the target allocation for each endPoint based on work 
it has to do
+// Assign one fragment (minimum) to all the 

[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sudheeshkatkam
Github user sudheeshkatkam commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86074701
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/LocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.PhysicalOperatorSetupException;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import java.util.Map;
+import java.util.List;
+import java.util.Collection;
+import java.util.HashMap;
+
+
+/**
+ * Implementation of {@link FragmentParallelizer} where fragment has zero 
or more endpoints.
+ * This is for Parquet Scan Fragments only. Fragment placement is done 
preferring strict
+ * data locality.
+ */
+public class LocalAffinityFragmentParallelizer implements 
FragmentParallelizer {
--- End diff --

When to use this vs HardAffinityFragmentParallelizer?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sudheeshkatkam
Github user sudheeshkatkam commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86073626
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/LocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.PhysicalOperatorSetupException;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import java.util.Map;
+import java.util.List;
+import java.util.Collection;
+import java.util.HashMap;
+
+
+/**
+ * Implementation of {@link FragmentParallelizer} where fragment has zero 
or more endpoints.
+ * This is for Parquet Scan Fragments only. Fragment placement is done 
preferring strict
+ * data locality.
+ */
+public class LocalAffinityFragmentParallelizer implements 
FragmentParallelizer {
+public static final LocalAffinityFragmentParallelizer INSTANCE = new 
LocalAffinityFragmentParallelizer();
+
+@Override
+public void parallelizeFragment(final Wrapper fragmentWrapper, final 
ParallelizationParameters parameters, final Collection 
activeEndpoints) throws PhysicalOperatorSetupException {
+
+// Find the parallelization width of fragment
+final Stats stats = fragmentWrapper.getStats();
+final ParallelizationInfo parallelizationInfo = 
stats.getParallelizationInfo();
+
+// 1. Find the parallelization based on cost. Use max cost of all 
operators in this fragment; this is consistent
+//with the calculation that ExcessiveExchangeRemover uses.
+int width = (int) Math.ceil(stats.getMaxCost() / 
parameters.getSliceTarget());
+
+// 2. Cap the parallelization width by fragment level width limit 
and system level per query width limit
+width = Math.min(width, 
Math.min(parallelizationInfo.getMaxWidth(), parameters.getMaxGlobalWidth()));
+
+// 3. Cap the parallelization width by system level per node width 
limit
+width = Math.min(width, parameters.getMaxWidthPerNode() * 
activeEndpoints.size());
+
+// 4. Make sure width is at least the min width enforced by 
operators
+width = Math.max(parallelizationInfo.getMinWidth(), width);
+
+// 5. Make sure width is at most the max width enforced by 
operators
+width = Math.min(parallelizationInfo.getMaxWidth(), width);
+
+// 6: Finally make sure the width is at least one
+width = Math.max(1, width);
+
+List endpointPool = Lists.newArrayList();
--- End diff --

final (and wherever possible, generously)


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sudheeshkatkam
Github user sudheeshkatkam commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86073971
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/LocalAffinityFragmentParallelizer.java
 ---
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.fragment;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.PhysicalOperatorSetupException;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import java.util.Map;
+import java.util.List;
+import java.util.Collection;
+import java.util.HashMap;
+
+
+/**
+ * Implementation of {@link FragmentParallelizer} where fragment has zero 
or more endpoints.
+ * This is for Parquet Scan Fragments only. Fragment placement is done 
preferring strict
+ * data locality.
+ */
+public class LocalAffinityFragmentParallelizer implements 
FragmentParallelizer {
+public static final LocalAffinityFragmentParallelizer INSTANCE = new 
LocalAffinityFragmentParallelizer();
+
+@Override
+public void parallelizeFragment(final Wrapper fragmentWrapper, final 
ParallelizationParameters parameters, final Collection 
activeEndpoints) throws PhysicalOperatorSetupException {
+
+// Find the parallelization width of fragment
+final Stats stats = fragmentWrapper.getStats();
+final ParallelizationInfo parallelizationInfo = 
stats.getParallelizationInfo();
+
+// 1. Find the parallelization based on cost. Use max cost of all 
operators in this fragment; this is consistent
+//with the calculation that ExcessiveExchangeRemover uses.
+int width = (int) Math.ceil(stats.getMaxCost() / 
parameters.getSliceTarget());
+
+// 2. Cap the parallelization width by fragment level width limit 
and system level per query width limit
+width = Math.min(width, 
Math.min(parallelizationInfo.getMaxWidth(), parameters.getMaxGlobalWidth()));
+
+// 3. Cap the parallelization width by system level per node width 
limit
+width = Math.min(width, parameters.getMaxWidthPerNode() * 
activeEndpoints.size());
+
+// 4. Make sure width is at least the min width enforced by 
operators
+width = Math.max(parallelizationInfo.getMinWidth(), width);
+
+// 5. Make sure width is at most the max width enforced by 
operators
+width = Math.min(parallelizationInfo.getMaxWidth(), width);
+
+// 6: Finally make sure the width is at least one
+width = Math.max(1, width);
+
+List endpointPool = Lists.newArrayList();
+List assignedEndPoints = Lists.newArrayList();
+
+Map endpointAffinityMap =
+
fragmentWrapper.getStats().getParallelizationInfo().getEndpointAffinityMap();
+
+int totalAssigned = 0;
+int totalWorkUnits = 0;
+
+// Get the total number of work units and list of endPoints to 
schedule fragments on
+for (Map.Entry epAff : 
endpointAffinityMap.entrySet()) {
+if (epAff.getValue().getNumLocalWorkUnits() > 0) {
+totalWorkUnits += epAff.getValue().getNumLocalWorkUnits();
+endpointPool.add(epAff.getKey());
+}
+}
+
+// Keep track of number of fragments allocated to each endpoint.
+Map endpointAssignments = new 
HashMap<>();
+
+// Keep track of how many more to assign to each endpoint.
+Map remainingEndpointAssignments = new 
HashMap<>();
+
+// Calculate the target allocation for each endPoint based on work 
it has to do
+// Assign one fragment (minimum) to all the 

[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86057320
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetGroupScan.java
 ---
@@ -822,10 +838,103 @@ private void getFiles(String path, List 
fileStatuses) throws IOExcep
 }
   }
 
+  /*
+   * Figure out the best node to scan each of the rowGroups and update the 
preferredEndpoint.
+   * Based on this, update the total work units assigned to the endpoint 
in the endpointAffinity.
+   */
+  private void computeRowGroupAssignment() {
+Map numEndpointAssignments = 
Maps.newHashMap();
+Map numAssignedBytes = Maps.newHashMap();
+
+// Do this for 2 iterations to adjust node assignments after first 
iteration.
+int numIterartions = 2;
+
+while (numIterartions-- > 0) {
+
+  for (RowGroupInfo rowGroupInfo : rowGroupInfos) {
+EndpointByteMap endpointByteMap = rowGroupInfo.getByteMap();
+
+// This can be empty for local file system or if drilbit is not 
running
+// on hosts which have data.
+if (endpointByteMap.isEmpty()) {
+  continue;
+}
+
+// Get the list of endpoints which have maximum (equal) data.
+List topEndpoints = 
endpointByteMap.getTopEndpoints();
--- End diff --

It took me a while to understand the below algorithm just by reading code. 
It will be helpful if we can name the variables better here and add some 
comment explaining different sections. Like changing as below might help:
1) "topEndPoints" to "maxRGDataEndPoints", 
2) "minBytes" to "assignedBytesOnPickedNode"
3) "numBytes" to "assignedBytesOnCurrEndpoint"
4) "endpoint" to "currEndpoint"

As per my understanding line 864 to 892 represents one section which has 
the below logic: 
1) For each row group assign a drillbit from topEndPoints list such that 
the chosen one is least loaded in terms of workunits.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86057298
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetGroupScan.java
 ---
@@ -530,6 +534,7 @@ public RowGroupInfo(@JsonProperty("path") String path, 
@JsonProperty("start") lo
   this.rowGroupIndex = rowGroupIndex;
   this.rowCount = rowCount;
   this.numRecordsToRead = rowCount;
+  this.preferredEndpoint = null;
--- End diff --

Not required.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86062215
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/schedule/AssignmentCreator.java
 ---
@@ -146,6 +153,32 @@ private AssignmentCreator(List 
incomingEndpoints, List unit
 return currentUnassignedList;
   }
 
+
+  private LinkedList 
assignLocal(List workList,
+  
Map endpointIterators) {
+LinkedList currentUnassignedList = 
Lists.newLinkedList();
+for (WorkEndpointListPair workPair : workList) {
+  DrillbitEndpoint endpoint = workPair.work.getPreferredEndpoint();
+  if (endpoint == null) {
+currentUnassignedList.add(workPair);
+continue;
+  }
+
+  FragIteratorWrapper iteratorWrapper = 
endpointIterators.get(endpoint);
+  if (iteratorWrapper == null) {
+currentUnassignedList.add(workPair);
+continue;
+  }
+
+  Integer assignment = iteratorWrapper.iter.next();
+  iteratorWrapper.count++;
--- End diff --

Here shouldn't we check if the "iteratorWrapper.count" is exceeding the 
"iteratorWrapper.maxCount" ?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86057156
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/physical/EndpointAffinity.java
 ---
@@ -75,6 +78,7 @@ public EndpointAffinity(final DrillbitEndpoint endpoint, 
final double affinity,
 this.affinity = affinity;
 this.mandatory = mandatory;
 this.maxWidth = maxWidth;
+this.numLocalWorkUnits = 0;
--- End diff --

Not needed. By default it will always be initialized to 0


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86060309
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetGroupScan.java
 ---
@@ -822,10 +838,103 @@ private void getFiles(String path, List 
fileStatuses) throws IOExcep
 }
   }
 
+  /*
+   * Figure out the best node to scan each of the rowGroups and update the 
preferredEndpoint.
+   * Based on this, update the total work units assigned to the endpoint 
in the endpointAffinity.
+   */
+  private void computeRowGroupAssignment() {
+Map numEndpointAssignments = 
Maps.newHashMap();
+Map numAssignedBytes = Maps.newHashMap();
+
+// Do this for 2 iterations to adjust node assignments after first 
iteration.
+int numIterartions = 2;
+
+while (numIterartions-- > 0) {
+
+  for (RowGroupInfo rowGroupInfo : rowGroupInfos) {
+EndpointByteMap endpointByteMap = rowGroupInfo.getByteMap();
+
+// This can be empty for local file system or if drilbit is not 
running
+// on hosts which have data.
+if (endpointByteMap.isEmpty()) {
+  continue;
+}
+
+// Get the list of endpoints which have maximum (equal) data.
+List topEndpoints = 
endpointByteMap.getTopEndpoints();
+
+long minBytes = 0, numBytes = 0;
+DrillbitEndpoint nodePicked = rowGroupInfo.preferredEndpoint;
+if (nodePicked != null && 
numAssignedBytes.containsKey(nodePicked)) {
+  minBytes = numAssignedBytes.get(nodePicked);
+}
+
+DrillbitEndpoint previousNodePicked = nodePicked;
+
+for (DrillbitEndpoint endpoint : topEndpoints) {
+  if (nodePicked == null) {
+nodePicked = endpoint;
+if (numAssignedBytes.containsKey(nodePicked)) {
+  minBytes = numAssignedBytes.get(nodePicked);
+}
+  }
+
+  if (numAssignedBytes.containsKey(endpoint)) {
+numBytes = numAssignedBytes.get(endpoint);
+  } else {
+numBytes = 0;
+  }
+
+  if (numBytes < minBytes) {
+nodePicked = endpoint;
+minBytes = numBytes;
+  }
+}
+
+if (nodePicked != null && nodePicked != previousNodePicked) {
+  numAssignedBytes.put(nodePicked, minBytes + 
endpointByteMap.get(nodePicked));
+  if (numEndpointAssignments.containsKey(nodePicked)) {
+numEndpointAssignments.put(nodePicked, 
numEndpointAssignments.get(nodePicked) + 1);
+  } else {
+numEndpointAssignments.put(nodePicked, 1);
+  }
+
+  // If a different node is picked in second iteration, update.
+  if (previousNodePicked != null) {
+numAssignedBytes.put(previousNodePicked,
+numAssignedBytes.get(previousNodePicked) - 
endpointByteMap.get(previousNodePicked));
+numEndpointAssignments.put(previousNodePicked, 
numEndpointAssignments.get(previousNodePicked) - 1);
+  }
+}
+rowGroupInfo.preferredEndpoint = nodePicked;
+  }
+}
+
+// Set the number of local work units for each endpoint in the 
endpointAffinity.
+for (EndpointAffinity epAff : endpointAffinities) {
+  DrillbitEndpoint endpoint = epAff.getEndpoint();
+  if (numEndpointAssignments.containsKey(endpoint)) {
+epAff.setNumLocalWorkUnits(numEndpointAssignments.get(endpoint));
+  } else {
+epAff.setNumLocalWorkUnits(0);
--- End diff --

"else" condition is not required since by default it will be set to 0


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] drill pull request #639: DRILL-4706: Fragment planning causes Drillbits to r...

2016-11-01 Thread sohami
Github user sohami commented on a diff in the pull request:

https://github.com/apache/drill/pull/639#discussion_r86060362
  
--- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetGroupScan.java
 ---
@@ -822,10 +838,103 @@ private void getFiles(String path, List 
fileStatuses) throws IOExcep
 }
   }
 
+  /*
+   * Figure out the best node to scan each of the rowGroups and update the 
preferredEndpoint.
+   * Based on this, update the total work units assigned to the endpoint 
in the endpointAffinity.
+   */
+  private void computeRowGroupAssignment() {
+Map numEndpointAssignments = 
Maps.newHashMap();
+Map numAssignedBytes = Maps.newHashMap();
+
+// Do this for 2 iterations to adjust node assignments after first 
iteration.
+int numIterartions = 2;
+
+while (numIterartions-- > 0) {
+
+  for (RowGroupInfo rowGroupInfo : rowGroupInfos) {
+EndpointByteMap endpointByteMap = rowGroupInfo.getByteMap();
+
+// This can be empty for local file system or if drilbit is not 
running
+// on hosts which have data.
+if (endpointByteMap.isEmpty()) {
+  continue;
+}
+
+// Get the list of endpoints which have maximum (equal) data.
+List topEndpoints = 
endpointByteMap.getTopEndpoints();
+
+long minBytes = 0, numBytes = 0;
+DrillbitEndpoint nodePicked = rowGroupInfo.preferredEndpoint;
+if (nodePicked != null && 
numAssignedBytes.containsKey(nodePicked)) {
+  minBytes = numAssignedBytes.get(nodePicked);
+}
+
+DrillbitEndpoint previousNodePicked = nodePicked;
+
+for (DrillbitEndpoint endpoint : topEndpoints) {
+  if (nodePicked == null) {
+nodePicked = endpoint;
+if (numAssignedBytes.containsKey(nodePicked)) {
+  minBytes = numAssignedBytes.get(nodePicked);
+}
+  }
+
+  if (numAssignedBytes.containsKey(endpoint)) {
+numBytes = numAssignedBytes.get(endpoint);
+  } else {
+numBytes = 0;
+  }
+
+  if (numBytes < minBytes) {
+nodePicked = endpoint;
+minBytes = numBytes;
+  }
+}
+
+if (nodePicked != null && nodePicked != previousNodePicked) {
+  numAssignedBytes.put(nodePicked, minBytes + 
endpointByteMap.get(nodePicked));
+  if (numEndpointAssignments.containsKey(nodePicked)) {
+numEndpointAssignments.put(nodePicked, 
numEndpointAssignments.get(nodePicked) + 1);
+  } else {
+numEndpointAssignments.put(nodePicked, 1);
+  }
+
+  // If a different node is picked in second iteration, update.
+  if (previousNodePicked != null) {
+numAssignedBytes.put(previousNodePicked,
+numAssignedBytes.get(previousNodePicked) - 
endpointByteMap.get(previousNodePicked));
+numEndpointAssignments.put(previousNodePicked, 
numEndpointAssignments.get(previousNodePicked) - 1);
+  }
+}
+rowGroupInfo.preferredEndpoint = nodePicked;
+  }
+}
+
+// Set the number of local work units for each endpoint in the 
endpointAffinity.
+for (EndpointAffinity epAff : endpointAffinities) {
+  DrillbitEndpoint endpoint = epAff.getEndpoint();
+  if (numEndpointAssignments.containsKey(endpoint)) {
+epAff.setNumLocalWorkUnits(numEndpointAssignments.get(endpoint));
+  } else {
+epAff.setNumLocalWorkUnits(0);
+  }
+}
+
--- End diff --

Please remove extra space. Please review other places as well.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---