[ 
https://issues.apache.org/jira/browse/STORM-898?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15058695#comment-15058695
 ] 

ASF GitHub Bot commented on STORM-898:
--------------------------------------

Github user rfarivar commented on a diff in the pull request:

    https://github.com/apache/storm/pull/921#discussion_r47691194
  
    --- Diff: 
storm-core/src/jvm/backtype/storm/scheduler/resource/RAS_Nodes.java ---
    @@ -0,0 +1,159 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + * <p>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package backtype.storm.scheduler.resource;
    +
    +import backtype.storm.Config;
    +import backtype.storm.scheduler.Cluster;
    +import backtype.storm.scheduler.ExecutorDetails;
    +import backtype.storm.scheduler.SchedulerAssignment;
    +import backtype.storm.scheduler.SupervisorDetails;
    +import backtype.storm.scheduler.Topologies;
    +import backtype.storm.scheduler.WorkerSlot;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import java.util.Collection;
    +import java.util.HashMap;
    +import java.util.Map;
    +
    +public class RAS_Nodes {
    +
    +    private Map<String, RAS_Node> nodeMap;
    +
    +    private static final Logger LOG = 
LoggerFactory.getLogger(RAS_Nodes.class);
    +
    +    public RAS_Nodes(Cluster cluster, Topologies topologies) {
    +        this.nodeMap = getAllNodesFrom(cluster, topologies);
    +    }
    +
    +    public static Map<String, RAS_Node> getAllNodesFrom(Cluster cluster, 
Topologies topologies) {
    +        Map<String, RAS_Node> nodeIdToNode = new HashMap<String, 
RAS_Node>();
    +        for (SupervisorDetails sup : cluster.getSupervisors().values()) {
    +            //Node ID and supervisor ID are the same.
    +            String id = sup.getId();
    +            boolean isAlive = !cluster.isBlackListed(id);
    +            LOG.debug("Found a {} Node {} {}",
    +                    isAlive ? "living" : "dead", id, sup.getAllPorts());
    +            LOG.debug("resources_mem: {}, resources_CPU: {}", 
sup.getTotalMemory(), sup.getTotalCPU());
    +            nodeIdToNode.put(sup.getId(), new RAS_Node(id, 
sup.getAllPorts(), isAlive, sup, cluster, topologies));
    +        }
    +        for (Map.Entry<String, SchedulerAssignment> entry : 
cluster.getAssignments().entrySet()) {
    +            String topId = entry.getValue().getTopologyId();
    +            for (WorkerSlot workerSlot : entry.getValue().getSlots()) {
    +                String id = workerSlot.getNodeId();
    +                RAS_Node node = nodeIdToNode.get(id);
    +                if (node == null) {
    +                    LOG.info("Found an assigned slot on a dead supervisor 
{} with executors {}",
    +                            workerSlot, RAS_Node.getExecutors(workerSlot, 
cluster));
    +                    node = new RAS_Node(id, null, false, null, cluster, 
topologies);
    +                    nodeIdToNode.put(id, node);
    +                }
    +                if (!node.isAlive()) {
    +                    //The supervisor on the node down so add an orphaned 
slot to hold the unsupervised worker
    +                    node.addOrphanedSlot(workerSlot);
    +                }
    +                if (node.assignInternal(workerSlot, topId, true)) {
    +                    LOG.warn("Bad scheduling state, " + workerSlot + " 
assigned multiple workers, unassigning everything...");
    +                    node.free(workerSlot);
    +                }
    +            }
    +        }
    +        updateAvailableResources(cluster, topologies, nodeIdToNode);
    +        return nodeIdToNode;
    +    }
    +
    +    /**
    +     * updates the available resources for every node in a cluster
    +     * by recalculating memory requirements.
    +     *
    +     * @param cluster      the cluster used in this calculation
    +     * @param topologies   container of all topologies
    +     * @param nodeIdToNode a map between node id and node
    +     */
    +    private static void updateAvailableResources(Cluster cluster,
    +                                                 Topologies topologies,
    +                                                 Map<String, RAS_Node> 
nodeIdToNode) {
    +        //recompute memory
    +        if (cluster.getAssignments().size() > 0) {
    +            for (Map.Entry<String, SchedulerAssignment> entry : 
cluster.getAssignments()
    +                    .entrySet()) {
    +                Map<ExecutorDetails, WorkerSlot> executorToSlot = 
entry.getValue()
    +                        .getExecutorToSlot();
    +                Map<ExecutorDetails, Double> topoMemoryResourceList = 
topologies.getById(entry.getKey()).getTotalMemoryResourceList();
    +
    +                if (topoMemoryResourceList == null || 
topoMemoryResourceList.size() == 0) {
    +                    continue;
    +                }
    +                for (Map.Entry<ExecutorDetails, WorkerSlot> execToSlot : 
executorToSlot
    +                        .entrySet()) {
    +                    WorkerSlot slot = execToSlot.getValue();
    +                    ExecutorDetails exec = execToSlot.getKey();
    +                    RAS_Node node = nodeIdToNode.get(slot.getNodeId());
    +                    if (!node.isAlive()) {
    +                        continue;
    +                        // We do not free the assigned slots (the orphaned 
slots) on the inactive supervisors
    +                        // The inactive node will be treated as a 
0-resource node and not available for other unassigned workers
    +                    }
    +                    if (topoMemoryResourceList.containsKey(exec)) {
    +                        node.consumeResourcesforTask(exec, 
topologies.getById(entry.getKey()));
    +                    } else {
    +                        LOG.warn("Resource Req not found...Scheduling 
Task{} with memory requirement as on heap - {} and off heap - {} and CPU 
requirement as {}",
    --- End diff --
    
    Too long, break (new line)


> Add priorities and per user resource guarantees to Resource Aware Scheduler
> ---------------------------------------------------------------------------
>
>                 Key: STORM-898
>                 URL: https://issues.apache.org/jira/browse/STORM-898
>             Project: Apache Storm
>          Issue Type: New Feature
>          Components: storm-core
>            Reporter: Robert Joseph Evans
>            Assignee: Boyang Jerry Peng
>         Attachments: Resource Aware Scheduler for Storm.pdf
>
>
> In a multi-tenant environment we would like to be able to give individual 
> users a guarantee of how much CPU/Memory/Network they will be able to use in 
> a cluster.  We would also like to know which topologies a user feels are the 
> most important to keep running if there are not enough resources to run all 
> of their topologies.
> Each user should be able to specify if their topology is production, staging, 
> or development. Within each of those categories a user should be able to give 
> a topology a priority, 0 to 10 with 10 being the highest priority (or 
> something like this).
> If there are not enough resources on a cluster to run a topology assume this 
> topology is running using resources and find the user that is most over their 
> guaranteed resources.  Shoot the lowest priority topology for that user, and 
> repeat until, this topology is able to run, or this topology would be the one 
> shot.   Ideally we don't actually shoot anything until we know that we would 
> have made enough room.
> If the cluster is over-subscribed and everyone is under their guarantee, and 
> this topology would not put the user over their guarantee.  Shoot the lowest 
> priority topology in this workers resource pool until there is enough room to 
> run the topology or this topology is the one that would be shot.  We might 
> also want to think about what to do if we are going to shoot a production 
> topology in an oversubscribed case, and perhaps we can shoot a non-production 
> topology instead even if the other user is not over their guarantee.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to