This is an automated email from the ASF dual-hosted git repository.

shimamoto pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/predictionio.git


The following commit(s) were added to refs/heads/develop by this push:
     new f65245c  Mark override methods for future safety maintenance (#513)
f65245c is described below

commit f65245c660a6a5802d56b9f2af4f708b5ab7c88f
Author: Naoki Takezoe <[email protected]>
AuthorDate: Thu May 16 10:15:11 2019 +0900

    Mark override methods for future safety maintenance (#513)
---
 .../controller/IdentityPreparator.scala            |  2 +-
 .../predictionio/controller/LAlgorithm.scala       |  6 +++---
 .../predictionio/controller/LAverageServing.scala  |  2 +-
 .../predictionio/controller/LDataSource.scala      |  4 ++--
 .../predictionio/controller/LFirstServing.scala    |  2 +-
 .../predictionio/controller/LPreparator.scala      |  2 +-
 .../apache/predictionio/controller/LServing.scala  |  4 ++--
 .../LocalFileSystemPersistentModel.scala           |  4 ++--
 .../apache/predictionio/controller/Metric.scala    | 22 +++++++++++-----------
 .../predictionio/controller/MetricEvaluator.scala  |  2 +-
 .../predictionio/controller/P2LAlgorithm.scala     |  6 +++---
 .../predictionio/controller/PAlgorithm.scala       |  6 +++---
 .../predictionio/controller/PDataSource.scala      |  4 ++--
 .../predictionio/controller/PPreparator.scala      |  2 +-
 .../predictionio/workflow/CleanupFunctions.scala   |  4 +---
 .../workflow/EngineServerPluginsActor.scala        |  2 +-
 .../predictionio/workflow/FakeWorkflow.scala       |  6 +++---
 17 files changed, 39 insertions(+), 41 deletions(-)

diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/IdentityPreparator.scala
 
b/core/src/main/scala/org/apache/predictionio/controller/IdentityPreparator.scala
index 8256142..b7a7b8f 100644
--- 
a/core/src/main/scala/org/apache/predictionio/controller/IdentityPreparator.scala
+++ 
b/core/src/main/scala/org/apache/predictionio/controller/IdentityPreparator.scala
@@ -30,7 +30,7 @@ import org.apache.spark.SparkContext
   * @group Preparator
   */
 class IdentityPreparator[TD] extends BasePreparator[TD, TD] {
-  def prepareBase(sc: SparkContext, td: TD): TD = td
+  override def prepareBase(sc: SparkContext, td: TD): TD = td
 }
 
 /** Companion object of [[IdentityPreparator]] that conveniently returns an
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LAlgorithm.scala 
b/core/src/main/scala/org/apache/predictionio/controller/LAlgorithm.scala
index 27d1d14..7a7637c 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/LAlgorithm.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/LAlgorithm.scala
@@ -45,7 +45,7 @@ import scala.reflect._
 abstract class LAlgorithm[PD, M : ClassTag, Q, P]
   extends BaseAlgorithm[RDD[PD], RDD[M], Q, P] {
 
-  def trainBase(sc: SparkContext, pd: RDD[PD]): RDD[M] = pd.map(train)
+  override def trainBase(sc: SparkContext, pd: RDD[PD]): RDD[M] = pd.map(train)
 
   /** Implement this method to produce a model from prepared data.
     *
@@ -54,7 +54,7 @@ abstract class LAlgorithm[PD, M : ClassTag, Q, P]
     */
   def train(pd: PD): M
 
-  def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
+  override def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
   : RDD[(Long, P)] = {
     val mRDD = bm.asInstanceOf[RDD[M]]
     batchPredict(mRDD, qs)
@@ -76,7 +76,7 @@ abstract class LAlgorithm[PD, M : ClassTag, Q, P]
     }
   }
 
-  def predictBase(localBaseModel: Any, q: Q): P = {
+  override def predictBase(localBaseModel: Any, q: Q): P = {
     predict(localBaseModel.asInstanceOf[M], q)
   }
 
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LAverageServing.scala 
b/core/src/main/scala/org/apache/predictionio/controller/LAverageServing.scala
index b51b9d1..1427fe6 100644
--- 
a/core/src/main/scala/org/apache/predictionio/controller/LAverageServing.scala
+++ 
b/core/src/main/scala/org/apache/predictionio/controller/LAverageServing.scala
@@ -27,7 +27,7 @@ import org.apache.predictionio.core.BaseAlgorithm
   */
 class LAverageServing[Q] extends LServing[Q, Double] {
   /** Returns the average of all algorithms' predictions. */
-  def serve(query: Q, predictions: Seq[Double]): Double = {
+  override def serve(query: Q, predictions: Seq[Double]): Double = {
     predictions.sum / predictions.length
   }
 }
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LDataSource.scala 
b/core/src/main/scala/org/apache/predictionio/controller/LDataSource.scala
index fc08e60..13b92b9 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/LDataSource.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/LDataSource.scala
@@ -38,14 +38,14 @@ import scala.reflect._
 abstract class LDataSource[TD: ClassTag, EI, Q, A]
   extends BaseDataSource[RDD[TD], EI, Q, A] {
 
-  def readTrainingBase(sc: SparkContext): RDD[TD] = {
+  override def readTrainingBase(sc: SparkContext): RDD[TD] = {
     sc.parallelize(Seq(None)).map(_ => readTraining())
   }
 
   /** Implement this method to only return training data from a data source */
   def readTraining(): TD
 
-  def readEvalBase(sc: SparkContext): Seq[(RDD[TD], EI, RDD[(Q, A)])] = {
+  override def readEvalBase(sc: SparkContext): Seq[(RDD[TD], EI, RDD[(Q, A)])] 
= {
     val localEvalData: Seq[(TD, EI, Seq[(Q, A)])] = readEval()
 
     localEvalData.map { case (td, ei, qaSeq) => {
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LFirstServing.scala 
b/core/src/main/scala/org/apache/predictionio/controller/LFirstServing.scala
index 01e3840..e192ea7 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/LFirstServing.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/LFirstServing.scala
@@ -27,7 +27,7 @@ import org.apache.predictionio.core.BaseAlgorithm
   */
 class LFirstServing[Q, P] extends LServing[Q, P] {
   /** Returns the first algorithm's prediction. */
-  def serve(query: Q, predictions: Seq[P]): P = predictions.head
+  override def serve(query: Q, predictions: Seq[P]): P = predictions.head
 }
 
 /** A concrete implementation of [[LServing]] returning the first algorithm's
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LPreparator.scala 
b/core/src/main/scala/org/apache/predictionio/controller/LPreparator.scala
index d29a5a3..12d8d2d 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/LPreparator.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/LPreparator.scala
@@ -36,7 +36,7 @@ import scala.reflect._
 abstract class LPreparator[TD, PD : ClassTag]
   extends BasePreparator[RDD[TD], RDD[PD]] {
 
-  def prepareBase(sc: SparkContext, rddTd: RDD[TD]): RDD[PD] = {
+  override def prepareBase(sc: SparkContext, rddTd: RDD[TD]): RDD[PD] = {
     rddTd.map(prepare)
   }
 
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LServing.scala 
b/core/src/main/scala/org/apache/predictionio/controller/LServing.scala
index 4b123fe..9bd1c99 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/LServing.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/LServing.scala
@@ -28,7 +28,7 @@ import org.apache.predictionio.core.BaseServing
   * @group Serving
   */
 abstract class LServing[Q, P] extends BaseServing[Q, P] {
-  def supplementBase(q: Q): Q = supplement(q)
+  override def supplementBase(q: Q): Q = supplement(q)
 
   /** :: Experimental ::
     * Implement this method to supplement the query before sending it to
@@ -40,7 +40,7 @@ abstract class LServing[Q, P] extends BaseServing[Q, P] {
   @Experimental
   def supplement(q: Q): Q = q
 
-  def serveBase(q: Q, ps: Seq[P]): P = {
+  override def serveBase(q: Q, ps: Seq[P]): P = {
     serve(q, ps)
   }
 
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/LocalFileSystemPersistentModel.scala
 
b/core/src/main/scala/org/apache/predictionio/controller/LocalFileSystemPersistentModel.scala
index deb2db4..18980bd 100644
--- 
a/core/src/main/scala/org/apache/predictionio/controller/LocalFileSystemPersistentModel.scala
+++ 
b/core/src/main/scala/org/apache/predictionio/controller/LocalFileSystemPersistentModel.scala
@@ -41,7 +41,7 @@ import org.apache.spark.SparkContext
   * @group Algorithm
   */
 trait LocalFileSystemPersistentModel[AP <: Params] extends PersistentModel[AP] 
{
-  def save(id: String, params: AP, sc: SparkContext): Boolean = {
+  override def save(id: String, params: AP, sc: SparkContext): Boolean = {
     Utils.save(id, this)
     true
   }
@@ -59,7 +59,7 @@ trait LocalFileSystemPersistentModel[AP <: Params] extends 
PersistentModel[AP] {
   */
 trait LocalFileSystemPersistentModelLoader[AP <: Params, M]
   extends PersistentModelLoader[AP, M] {
-  def apply(id: String, params: AP, sc: Option[SparkContext]): M = {
+  override def apply(id: String, params: AP, sc: Option[SparkContext]): M = {
     Utils.load(id).asInstanceOf[M]
   }
 }
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/Metric.scala 
b/core/src/main/scala/org/apache/predictionio/controller/Metric.scala
index bc29092..853135c 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/Metric.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/Metric.scala
@@ -103,9 +103,9 @@ abstract class AverageMetric[EI, Q, P, A]
   /** Implement this method to return a score that will be used for averaging
     * across all QPA tuples.
     */
-  def calculate(q: Q, p: P, a: A): Double
+  override def calculate(q: Q, p: P, a: A): Double
 
-  def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, A)])])
+  override def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, 
A)])])
   : Double = {
     calculateStats(sc, evalDataSet).mean
   }
@@ -128,9 +128,9 @@ abstract class OptionAverageMetric[EI, Q, P, A]
   /** Implement this method to return a score that will be used for averaging
     * across all QPA tuples.
     */
-  def calculate(q: Q, p: P, a: A): Option[Double]
+  override def calculate(q: Q, p: P, a: A): Option[Double]
 
-  def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, A)])])
+  override def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, 
A)])])
   : Double = {
     calculateStats(sc, evalDataSet).mean
   }
@@ -156,9 +156,9 @@ abstract class StdevMetric[EI, Q, P, A]
     * the stdev
     * across all QPA tuples.
     */
-  def calculate(q: Q, p: P, a: A): Double
+  override def calculate(q: Q, p: P, a: A): Double
 
-  def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, A)])])
+  override def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, 
A)])])
   : Double = {
     calculateStats(sc, evalDataSet).stdev
   }
@@ -184,9 +184,9 @@ abstract class OptionStdevMetric[EI, Q, P, A]
     * the stdev
     * across all QPA tuples.
     */
-  def calculate(q: Q, p: P, a: A): Option[Double]
+  override def calculate(q: Q, p: P, a: A): Option[Double]
 
-  def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, A)])])
+  override def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, 
A)])])
   : Double = {
     calculateStats(sc, evalDataSet).stdev
   }
@@ -208,9 +208,9 @@ abstract class SumMetric[EI, Q, P, A, R: ClassTag](implicit 
num: Numeric[R])
   /** Implement this method to return a score that will be used for summing
     * across all QPA tuples.
     */
-  def calculate(q: Q, p: P, a: A): R
+  override def calculate(q: Q, p: P, a: A): R
 
-  def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, A)])])
+  override def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, 
A)])])
   : R = {
     val union: RDD[R] = sc.union(
       evalDataSet.map { case (_, qpaRDD) =>
@@ -232,7 +232,7 @@ abstract class SumMetric[EI, Q, P, A, R: ClassTag](implicit 
num: Numeric[R])
   * @group Evaluation
   */
 class ZeroMetric[EI, Q, P, A] extends Metric[EI, Q, P, A, Double]() {
-   def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, A)])]): 
Double = 0.0
+  override def calculate(sc: SparkContext, evalDataSet: Seq[(EI, RDD[(Q, P, 
A)])]): Double = 0.0
 }
 
 /** Companion object of [[ZeroMetric]]
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/MetricEvaluator.scala 
b/core/src/main/scala/org/apache/predictionio/controller/MetricEvaluator.scala
index fc5ec15..17f0909 100644
--- 
a/core/src/main/scala/org/apache/predictionio/controller/MetricEvaluator.scala
+++ 
b/core/src/main/scala/org/apache/predictionio/controller/MetricEvaluator.scala
@@ -215,7 +215,7 @@ class MetricEvaluator[EI, Q, P, A, R] (
     writer.close()
   }
 
-  def evaluateBase(
+  override def evaluateBase(
     sc: SparkContext,
     evaluation: Evaluation,
     engineEvalDataSet: Seq[(EngineParams, Seq[(EI, RDD[(Q, P, A)])])],
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/P2LAlgorithm.scala 
b/core/src/main/scala/org/apache/predictionio/controller/P2LAlgorithm.scala
index c617d2c..1f59ecb 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/P2LAlgorithm.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/P2LAlgorithm.scala
@@ -46,7 +46,7 @@ import scala.reflect._
 abstract class P2LAlgorithm[PD, M: ClassTag, Q: ClassTag, P]
   extends BaseAlgorithm[PD, M, Q, P] {
 
-  def trainBase(sc: SparkContext, pd: PD): M = train(sc, pd)
+  override def trainBase(sc: SparkContext, pd: PD): M = train(sc, pd)
 
   /** Implement this method to produce a model from prepared data.
     *
@@ -55,7 +55,7 @@ abstract class P2LAlgorithm[PD, M: ClassTag, Q: ClassTag, P]
     */
   def train(sc: SparkContext, pd: PD): M
 
-  def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
+  override def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
   : RDD[(Long, P)] = batchPredict(bm.asInstanceOf[M], qs)
 
   /** This is a default implementation to perform batch prediction. Override
@@ -70,7 +70,7 @@ abstract class P2LAlgorithm[PD, M: ClassTag, Q: ClassTag, P]
     qs.mapValues { q => predict(m, q) }
   }
 
-  def predictBase(bm: Any, q: Q): P = predict(bm.asInstanceOf[M], q)
+  override def predictBase(bm: Any, q: Q): P = predict(bm.asInstanceOf[M], q)
 
   /** Implement this method to produce a prediction from a query and trained
     * model.
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/PAlgorithm.scala 
b/core/src/main/scala/org/apache/predictionio/controller/PAlgorithm.scala
index 55f8363..7694956 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/PAlgorithm.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/PAlgorithm.scala
@@ -47,7 +47,7 @@ import org.apache.spark.rdd.RDD
 abstract class PAlgorithm[PD, M, Q, P]
   extends BaseAlgorithm[PD, M, Q, P] {
 
-  def trainBase(sc: SparkContext, pd: PD): M = train(sc, pd)
+  override def trainBase(sc: SparkContext, pd: PD): M = train(sc, pd)
 
   /** Implement this method to produce a model from prepared data.
     *
@@ -56,7 +56,7 @@ abstract class PAlgorithm[PD, M, Q, P]
     */
   def train(sc: SparkContext, pd: PD): M
 
-  def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
+  override def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
   : RDD[(Long, P)] = batchPredict(bm.asInstanceOf[M], qs)
 
   /** To provide evaluation feature, one must override and implement this 
method
@@ -72,7 +72,7 @@ abstract class PAlgorithm[PD, M, Q, P]
   def batchPredict(m: M, qs: RDD[(Long, Q)]): RDD[(Long, P)] =
     throw new NotImplementedError("batchPredict not implemented")
 
-  def predictBase(baseModel: Any, query: Q): P = {
+  override def predictBase(baseModel: Any, query: Q): P = {
     predict(baseModel.asInstanceOf[M], query)
   }
 
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/PDataSource.scala 
b/core/src/main/scala/org/apache/predictionio/controller/PDataSource.scala
index cd9b853..184fa85 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/PDataSource.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/PDataSource.scala
@@ -37,12 +37,12 @@ import org.apache.spark.rdd.RDD
 abstract class PDataSource[TD, EI, Q, A]
   extends BaseDataSource[TD, EI, Q, A] {
 
-  def readTrainingBase(sc: SparkContext): TD = readTraining(sc)
+  override def readTrainingBase(sc: SparkContext): TD = readTraining(sc)
 
   /** Implement this method to only return training data from a data source */
   def readTraining(sc: SparkContext): TD
 
-  def readEvalBase(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = readEval(sc)
+  override def readEvalBase(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = 
readEval(sc)
 
   /** To provide evaluation feature for your engine, your must override this
     * method to return data for evaluation from a data source. Returned data 
can
diff --git 
a/core/src/main/scala/org/apache/predictionio/controller/PPreparator.scala 
b/core/src/main/scala/org/apache/predictionio/controller/PPreparator.scala
index ce445b8..cec9591 100644
--- a/core/src/main/scala/org/apache/predictionio/controller/PPreparator.scala
+++ b/core/src/main/scala/org/apache/predictionio/controller/PPreparator.scala
@@ -33,7 +33,7 @@ import org.apache.spark.SparkContext
 abstract class PPreparator[TD, PD]
   extends BasePreparator[TD, PD] {
 
-  def prepareBase(sc: SparkContext, td: TD): PD = {
+  override def prepareBase(sc: SparkContext, td: TD): PD = {
     prepare(sc, td)
   }
 
diff --git 
a/core/src/main/scala/org/apache/predictionio/workflow/CleanupFunctions.scala 
b/core/src/main/scala/org/apache/predictionio/workflow/CleanupFunctions.scala
index bdd8b01..7312555 100644
--- 
a/core/src/main/scala/org/apache/predictionio/workflow/CleanupFunctions.scala
+++ 
b/core/src/main/scala/org/apache/predictionio/workflow/CleanupFunctions.scala
@@ -37,7 +37,7 @@ object CleanupFunctions {
     * CleanupFunctions.add { MyStorageClass.close }
     * }}}
     *
-    * @param anonymous function containing cleanup code.
+    * @param f function containing cleanup code.
     */
   def add(f: () => Unit): Seq[() => Unit] = {
     functions = functions :+ f
@@ -56,8 +56,6 @@ object CleanupFunctions {
     *   CleanupFunctions.run()
     * }
     * }}}
-    *
-    * @param anonymous function containing cleanup code.
     */
   def run(): Unit = {
     functions.foreach { f => f() }
diff --git 
a/core/src/main/scala/org/apache/predictionio/workflow/EngineServerPluginsActor.scala
 
b/core/src/main/scala/org/apache/predictionio/workflow/EngineServerPluginsActor.scala
index 0f052ab..b313773 100644
--- 
a/core/src/main/scala/org/apache/predictionio/workflow/EngineServerPluginsActor.scala
+++ 
b/core/src/main/scala/org/apache/predictionio/workflow/EngineServerPluginsActor.scala
@@ -29,7 +29,7 @@ class PluginsActor(engineVariant: String) extends Actor {
 
   val pluginContext = EngineServerPluginContext(log, engineVariant)
 
-  def receive: PartialFunction[Any, Unit] = {
+  override def receive: PartialFunction[Any, Unit] = {
     case (ei: EngineInstance, q: JValue, p: JValue) =>
       pluginContext.outputSniffers.values.foreach(_.process(ei, q, p, 
pluginContext))
     case h: PluginsActor.HandleREST =>
diff --git 
a/core/src/main/scala/org/apache/predictionio/workflow/FakeWorkflow.scala 
b/core/src/main/scala/org/apache/predictionio/workflow/FakeWorkflow.scala
index 8e4db51..f9e9a1a 100644
--- a/core/src/main/scala/org/apache/predictionio/workflow/FakeWorkflow.scala
+++ b/core/src/main/scala/org/apache/predictionio/workflow/FakeWorkflow.scala
@@ -34,7 +34,7 @@ private[predictionio] class FakeEngine
 extends BaseEngine[EmptyParams, EmptyParams, EmptyParams, EmptyParams] {
   @transient lazy val logger = Logger[this.type]
 
-  def train(
+  override def train(
     sc: SparkContext,
     engineParams: EngineParams,
     engineInstanceId: String,
@@ -42,7 +42,7 @@ extends BaseEngine[EmptyParams, EmptyParams, EmptyParams, 
EmptyParams] {
     throw new StopAfterReadInterruption()
   }
 
-  def eval(
+  override def eval(
     sc: SparkContext,
     engineParams: EngineParams,
     params: WorkflowParams)
@@ -56,7 +56,7 @@ private[predictionio] class FakeRunner(f: (SparkContext => 
Unit))
     extends BaseEvaluator[EmptyParams, EmptyParams, EmptyParams, EmptyParams,
       FakeEvalResult] {
   @transient private lazy val logger = Logger[this.type]
-  def evaluateBase(
+  override def evaluateBase(
     sc: SparkContext,
     evaluation: Evaluation,
     engineEvalDataSet:

Reply via email to