Github user sethah commented on a diff in the pull request:

    https://github.com/apache/spark/pull/15413#discussion_r85189028
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala ---
    @@ -316,24 +319,129 @@ class GaussianMixture @Since("2.0.0") (
       @Since("2.0.0")
       def setSeed(value: Long): this.type = set(seed, value)
     
    +  // number of samples per cluster to use when initializing Gaussians
    +  private val nSamples = 5
    +
       @Since("2.0.0")
       override def fit(dataset: Dataset[_]): GaussianMixtureModel = {
         transformSchema(dataset.schema, logging = true)
    -    val rdd: RDD[OldVector] = dataset.select(col($(featuresCol))).rdd.map {
    -      case Row(point: Vector) => OldVectors.fromML(point)
    +
    +    val sc = dataset.sparkSession.sparkContext
    +    val _k = $(k)
    +
    +    val instances: RDD[Vector] = 
dataset.select(col($(featuresCol))).rdd.map {
    +      case Row(features: Vector) => features
    +    }.cache()
    +
    +    // Extract the number of features.
    +    val numFeatures = instances.first().size
    +
    +    val shouldDistributeGaussians = 
GaussianMixture.shouldDistributeGaussians(_k, numFeatures)
    +
    +    // Determine initial weights and corresponding Gaussians.
    +    // We start with uniform weights, a random mean from the data, and
    +    // diagonal covariance matrices using component variances
    +    // derived from the samples.
    +    // TODO: Support users supplied initial GMM.
    +    val samples = instances.takeSample(withReplacement = true, _k * 
nSamples, $(seed))
    +    val weights: Array[Double] = Array.fill(_k)(1.0 / _k)
    +    /**
    +     * Since the covariance matrix of multivariate gaussian distribution 
is symmetric,
    +     * only the upper triangular part of the matrix will be stored as a 
dense vector
    +     * in order to reduce the shuffled data size.
    +     */
    +    val gaussians: Array[(DenseVector, DenseVector)] = Array.tabulate(_k) 
{ i =>
    +      val slice = samples.view(i * nSamples, (i + 1) * nSamples)
    +      val mean = {
    +        val v = new DenseVector(Array.fill[Double](numFeatures)(0.0))
    +        var i = 0
    +        while (i < nSamples) {
    +          BLAS.axpy(1.0, slice(i), v)
    +          i += 1
    +        }
    +        BLAS.scal(1.0 / nSamples, v)
    +        v
    +      }
    +      /**
    +       * Construct matrix where diagonal entries are element-wise
    +       * variance of input vectors (computes biased variance).
    +       */
    +      val cov = {
    +        val ss = new 
DenseVector(Array.fill[Double](numFeatures)(0)).asBreeze
    +        slice.foreach(xi => ss += (xi.asBreeze - mean.asBreeze) :^ 2.0)
    +        val diagVec = Vectors.fromBreeze(ss)
    +        BLAS.scal(1.0 / nSamples, diagVec)
    +        val covVec = new DenseVector(Array.fill[Double](
    +          numFeatures * (numFeatures + 1) / 2)(0.0))
    +        diagVec.toArray.zipWithIndex.foreach { case (v: Double, i: Int) =>
    +          covVec.values(i + i * (i + 1) / 2) = v
    +        }
    +        covVec
    +      }
    +      (mean, cov)
         }
     
    -    val algo = new MLlibGM()
    -      .setK($(k))
    -      .setMaxIterations($(maxIter))
    -      .setSeed($(seed))
    -      .setConvergenceTol($(tol))
    -    val parentModel = algo.run(rdd)
    -    val gaussians = parentModel.gaussians.map { case g =>
    -      new MultivariateGaussian(g.mu.asML, g.sigma.asML)
    +    var llh = Double.MinValue // current log-likelihood
    --- End diff --
    
    minor: why not `logLikelihood` and `logLikelihoodPrev` ? It's nice to have 
descriptive variable names, then we can remove the comments.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to