Github user sethah commented on a diff in the pull request:
https://github.com/apache/spark/pull/20632#discussion_r169703784
--- Diff: mllib/src/main/scala/org/apache/spark/ml/tree/Node.scala ---
@@ -287,6 +291,34 @@ private[tree] class LearningNode(
}
}
+ /**
+ * @return true iff the node is a leaf.
+ */
+ private def isLeafNode(): Boolean = leftChild.isEmpty &&
rightChild.isEmpty
+
+ // the set of (leaf) predictions appearing in the subtree rooted at the
given node.
+ private lazy val leafPredictions: Set[Double] = {
--- End diff --
This will store a potentially very large collection at each node. For deep
regression trees the storage cost could be quite large. We can accomplish the
same thing without storing them:
```scala
def toNode: Node = {
// convert to an inner node only when:
// -) the node is not a leaf, and
// -) the subtree rooted at this node cannot be replaced by a single
leaf
// (i.e., there at least two different leaf predictions appear in
the subtree)
if (!isLeafNode) {
assert(leftChild.nonEmpty && rightChild.nonEmpty && split.nonEmpty &&
stats != null,
"Unknown error during Decision Tree learning. Could not convert
LearningNode to Node.")
(leftChild.get.toNode, rightChild.get.toNode) match {
case (l: LeafNode, r: LeafNode) if l.prediction == r.prediction =>
new LeafNode(l.prediction, stats.impurity,
stats.impurityCalculator)
case (l, r) =>
new InternalNode(stats.impurityCalculator.predict,
stats.impurity, stats.gain,
l, r, split.get, stats.impurityCalculator)
}
} else {
if (stats.valid) {
new LeafNode(stats.impurityCalculator.predict, stats.impurity,
stats.impurityCalculator)
} else {
// Here we want to keep same behavior with the old
mllib.DecisionTreeModel
new LeafNode(stats.impurityCalculator.predict, -1.0,
stats.impurityCalculator)
}
}
}
```
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]