Github user feynmanliang commented on a diff in the pull request:
https://github.com/apache/spark/pull/6785#discussion_r41379676
--- Diff:
mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala ---
@@ -102,6 +110,66 @@ class ChiSqSelectorModel @Since("1.3.0") (
s"Only sparse and dense vectors are supported but got
${other.getClass}.")
}
}
+
+ override def save(sc: SparkContext, path: String): Unit = {
+ ChiSqSelectorModel.SaveLoadV1_0.save(sc, this, path)
+ }
+
+ override protected def formatVersion: String = "1.0"
+}
+
+object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] {
+ override def load(sc: SparkContext, path: String): ChiSqSelectorModel = {
+ ChiSqSelectorModel.SaveLoadV1_0.load(sc, path)
+ }
+
+ private[feature]
+ object SaveLoadV1_0 {
+
+ private val thisFormatVersion = "1.0"
+
+ /** Model data for import/export */
+ case class Data(feature: Int)
+
+ private[feature]
+ val thisClassName = "org.apache.spark.mllib.feature.ChiSqSelectorModel"
+
+ def save(sc: SparkContext, model: ChiSqSelectorModel, path: String):
Unit = {
+ val sqlContext = new SQLContext(sc)
+ import sqlContext.implicits._
+ val metadata = compact(render(
+ ("class" -> thisClassName) ~ ("version" -> thisFormatVersion)))
+ sc.parallelize(Seq(metadata),
1).saveAsTextFile(Loader.metadataPath(path))
+
+ // Create Parquet data.
+ val dataArray = Array.tabulate(model.selectedFeatures.length) { i =>
+ Data(model.selectedFeatures(i))
+ }
+ sc.parallelize(dataArray,
1).toDF().write.parquet(Loader.dataPath(path))
+
+ }
+
+ def load(sc: SparkContext, path: String): ChiSqSelectorModel = {
+ implicit val formats = DefaultFormats
+ val sqlContext = new SQLContext(sc)
+ val (className, formatVersion, metadata) = Loader.loadMetadata(sc,
path)
+ assert(className == thisClassName)
+ assert(formatVersion == thisFormatVersion)
+
+ val dataFrame = sqlContext.read.parquet(Loader.dataPath(path))
+ val dataArray = dataFrame.select("feature")
+
+ // Check schema explicitly since erasure makes it hard to use
match-case for checking.
+ Loader.checkSchema[Data](dataFrame.schema)
+
+ val features = dataArray.map {
+ case Row(feature: Int) =>
--- End diff --
nit: 166-167 can be on one line
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]