This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new aa41dce [SPARK-28159][ML][FOLLOWUP] fix typo & (0 until
v.size).toList => List.range(0, v.size)
aa41dce is described below
commit aa41dcea4a41899507dfe4ec1eceaabb5edf728f
Author: zhengruifeng <[email protected]>
AuthorDate: Fri Jul 12 11:00:16 2019 -0700
[SPARK-28159][ML][FOLLOWUP] fix typo & (0 until v.size).toList =>
List.range(0, v.size)
## What changes were proposed in this pull request?
fix typo in spark-28159
`transfromWithMean` -> `transformWithMean`
## How was this patch tested?
existing test
Closes #25129 from zhengruifeng/to_ml_vec_cleanup.
Authored-by: zhengruifeng <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala | 2 +-
mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala | 2 +-
.../main/scala/org/apache/spark/mllib/feature/StandardScaler.scala | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
index aa81037..681bb95 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
@@ -490,7 +490,7 @@ abstract class LDAModel private[ml] (
Vectors.zeros(k)
} else {
val (ids: List[Int], cts: Array[Double]) = vector match {
- case v: DenseVector => ((0 until v.size).toList, v.values)
+ case v: DenseVector => (List.range(0, v.size), v.values)
case v: SparseVector => (v.indices.toList, v.values)
case other =>
throw new UnsupportedOperationException(
diff --git
a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
index 17f2c17..81cf2e1 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala
@@ -169,7 +169,7 @@ class StandardScalerModel private[ml] (
case d: DenseVector => d.values.clone()
case v: Vector => v.toArray
}
- val newValues = scaler.transfromWithMean(values)
+ val newValues = scaler.transformWithMean(values)
Vectors.dense(newValues)
} else if ($(withStd)) {
vector: Vector =>
diff --git
a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
index 578b779..19e53e7 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
@@ -141,7 +141,7 @@ class StandardScalerModel @Since("1.3.0") (
case d: DenseVector => d.values.clone()
case v: Vector => v.toArray
}
- val newValues = transfromWithMean(values)
+ val newValues = transformWithMean(values)
Vectors.dense(newValues)
} else if (withStd) {
vector match {
@@ -161,7 +161,7 @@ class StandardScalerModel @Since("1.3.0") (
}
}
- private[spark] def transfromWithMean(values: Array[Double]): Array[Double] =
{
+ private[spark] def transformWithMean(values: Array[Double]): Array[Double] =
{
// By default, Scala generates Java methods for member variables. So every
time when
// the member variables are accessed, `invokespecial` will be called which
is expensive.
// This can be avoid by having a local reference of `shift`.
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]