Repository: spark
Updated Branches:
  refs/heads/master b5ce84a1b -> 7c970f909


Minor corrections, i.e. typo fixes and follow deprecated

Author: Jacek Laskowski <[email protected]>

Closes #10432 from jaceklaskowski/minor-corrections.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/7c970f90
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/7c970f90
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/7c970f90

Branch: refs/heads/master
Commit: 7c970f9093bda0a789d7d6e43c72a6d317fc3723
Parents: b5ce84a
Author: Jacek Laskowski <[email protected]>
Authored: Tue Dec 22 10:47:10 2015 -0800
Committer: Reynold Xin <[email protected]>
Committed: Tue Dec 22 10:47:10 2015 -0800

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/SparkContext.scala          | 2 +-
 .../org/apache/spark/executor/CoarseGrainedExecutorBackend.scala | 2 +-
 .../scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala    | 2 +-
 .../main/scala/org/apache/spark/scheduler/TaskSetManager.scala   | 4 ++--
 .../spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala  | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/7c970f90/core/src/main/scala/org/apache/spark/SparkContext.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index c4541aa..67230f4 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -2095,7 +2095,7 @@ class SparkContext(config: SparkConf) extends Logging 
with ExecutorAllocationCli
 
   /** Default min number of partitions for Hadoop RDDs when not given by user 
*/
   @deprecated("use defaultMinPartitions", "1.0.0")
-  def defaultMinSplits: Int = math.min(defaultParallelism, 2)
+  def defaultMinSplits: Int = defaultMinPartitions
 
   /**
    * Default min number of partitions for Hadoop RDDs when not given by user

http://git-wip-us.apache.org/repos/asf/spark/blob/7c970f90/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index c2ebf30..77c88ba 100644
--- 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -257,7 +257,7 @@ private[spark] object CoarseGrainedExecutorBackend extends 
Logging {
     // scalastyle:off println
     System.err.println(
       """
-      |"Usage: CoarseGrainedExecutorBackend [options]
+      |Usage: CoarseGrainedExecutorBackend [options]
       |
       | Options are:
       |   --driver-url <driverUrl>

http://git-wip-us.apache.org/repos/asf/spark/blob/7c970f90/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala 
b/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
index d2e94f9..cd6f00c 100644
--- a/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
@@ -26,7 +26,7 @@ import org.apache.spark.rpc.RpcAddress
  * The `rpcAddress` may be null, in which case the endpoint is registered via 
a client-only
  * connection and can only be reached via the client that sent the endpoint 
reference.
  *
- * @param rpcAddress The socket address of the endpint.
+ * @param rpcAddress The socket address of the endpoint.
  * @param name Name of the endpoint.
  */
 private[netty] case class RpcEndpointAddress(val rpcAddress: RpcAddress, val 
name: String) {

http://git-wip-us.apache.org/repos/asf/spark/blob/7c970f90/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala 
b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
index a02f301..380301f 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
@@ -608,7 +608,7 @@ private[spark] class TaskSetManager(
   }
 
   /**
-   * Marks the task as successful and notifies the DAGScheduler that a task 
has ended.
+   * Marks a task as successful and notifies the DAGScheduler that the task 
has ended.
    */
   def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
     val info = taskInfos(tid)
@@ -705,7 +705,7 @@ private[spark] class TaskSetManager(
         ef.exception
 
       case e: ExecutorLostFailure if !e.exitCausedByApp =>
-        logInfo(s"Task $tid failed because while it was being computed, its 
executor" +
+        logInfo(s"Task $tid failed because while it was being computed, its 
executor " +
           "exited for a reason unrelated to the task. Not counting this 
failure towards the " +
           "maximum number of failures for the task.")
         None

http://git-wip-us.apache.org/repos/asf/spark/blob/7c970f90/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
index 2279e8c..f222007 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
@@ -30,7 +30,7 @@ import 
org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT
 import org.apache.spark.util.{ThreadUtils, SerializableBuffer, AkkaUtils, 
Utils}
 
 /**
- * A scheduler backend that waits for coarse grained executors to connect to 
it through Akka.
+ * A scheduler backend that waits for coarse-grained executors to connect.
  * This backend holds onto each executor for the duration of the Spark job 
rather than relinquishing
  * executors whenever a task is done and asking the scheduler to launch a new 
executor for
  * each new task. Executors may be launched in a variety of ways, such as 
Mesos tasks for the


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to