Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/11581#discussion_r56081484
  
    --- Diff: mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala ---
    @@ -101,3 +109,127 @@ private[ml] trait TreeEnsembleModel {
       /** Total number of nodes, summed over all trees in the ensemble. */
       lazy val totalNumNodes: Int = trees.map(_.numNodes).sum
     }
    +
    +/** Helper classes for tree model persistence */
    +private[ml] object DecisionTreeModelReadWrite {
    +
    +  /**
    +   * Info for a [[org.apache.spark.ml.tree.Split]]
    +   *
    +   * @param featureIndex  Index of feature split on
    +   * @param leftCategoriesOrThreshold  For categorical feature, set of 
leftCategories.
    +   *                                   For continuous feature, threshold.
    +   * @param numCategories  For categorical feature, number of categories.
    +   *                       For continuous feature, -1.
    +   */
    +  case class SplitData(
    +      featureIndex: Int,
    +      leftCategoriesOrThreshold: Array[Double],
    +      numCategories: Int) {
    +
    +    def getSplit: Split = {
    +      if (numCategories != -1) {
    +        new CategoricalSplit(featureIndex, leftCategoriesOrThreshold, 
numCategories)
    +      } else {
    +        assert(leftCategoriesOrThreshold.length == 1, s"DecisionTree split 
data expected" +
    +          s" 1 threshold for ContinuousSplit, but found thresholds: " +
    +          leftCategoriesOrThreshold.mkString(", "))
    +        new ContinuousSplit(featureIndex, leftCategoriesOrThreshold(0))
    +      }
    +    }
    +  }
    +
    +  object SplitData {
    +    def apply(split: Split): SplitData = split match {
    +      case s: CategoricalSplit =>
    +        SplitData(s.featureIndex, s.leftCategories, s.numCategories)
    +      case s: ContinuousSplit =>
    +        SplitData(s.featureIndex, Array(s.threshold), -1)
    +    }
    +  }
    +
    +  /**
    +   * Info for a [[Node]]
    +   *
    +   * @param id  Index used for tree reconstruction.  Indices follow an 
in-order traversal.
    +   * @param impurityStats  Stats array.  Impurity type is stored in 
metadata.
    +   * @param gain  Gain, or arbitrary value if leaf node.
    +   * @param leftChild  Left child index, or arbitrary value if leaf node.
    +   * @param rightChild  Right child index, or arbitrary value if leaf node.
    +   * @param split  Split info, or arbitrary value if leaf node.
    +   */
    +  case class NodeData(
    +    id: Int,
    +    prediction: Double,
    +    impurity: Double,
    +    impurityStats: Array[Double],
    +    gain: Double,
    +    leftChild: Int,
    +    rightChild: Int,
    +    split: SplitData)
    +
    +  object NodeData {
    +    /**
    +     * Create [[NodeData]] instances for this node and all children.
    +     *
    +     * @param id  Current ID.  IDs are assigned via an in-order traversal.
    +     * @return (sequence of nodes in in-order traversal order, largest ID 
in subtree)
    +     *         The nodes are returned in in-order traversal (root first) 
so that it is easy to
    +     *         get the ID of the subtree's root node.
    --- End diff --
    
    Whoops!


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to