Github user jackylk commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/2205#discussion_r183362086
  
    --- Diff: store/search/src/main/scala/org/apache/spark/rpc/Scheduler.scala 
---
    @@ -0,0 +1,116 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.rpc
    +
    +import java.io.IOException
    +import java.util.concurrent.atomic.AtomicInteger
    +
    +import scala.collection.mutable
    +import scala.concurrent.Future
    +import scala.reflect.ClassTag
    +import scala.util.Random
    +
    +import org.apache.carbondata.core.util.CarbonProperties
    +
    +/**
    + * [[org.apache.spark.rpc.Master]] uses Scheduler to pick a Worker to send 
request
    + */
    +private[rpc] class Scheduler {
    +  private val workers = mutable.Map[String, Schedulable]()
    +  private val random = new Random()
    +
    +  /**
    +   * Pick a Worker according to the address and workload of the Worker
    +   * Invoke the RPC and return Future result
    +   */
    +  def sendRequestAsync[T: ClassTag](
    +      splitAddress: String,
    +      request: Any): (Schedulable, Future[T]) = {
    +    require(splitAddress != null)
    +    if (workers.isEmpty) {
    +      throw new IOException("No worker is available")
    +    }
    +    var worker = pickWorker(splitAddress)
    +
    +    // check whether worker exceed max workload, if exceeded, pick another 
worker
    +    val maxWorkload = 
CarbonProperties.getMaxWorkloadForWorker(worker.cores)
    +    var numTry = 10
    --- End diff --
    
    ok, I will change it to the number of workers (workers.size())


---

Reply via email to