[ 
https://issues.apache.org/jira/browse/FLINK-2157?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14618488#comment-14618488
 ] 

ASF GitHub Bot commented on FLINK-2157:
---------------------------------------

Github user thvasilo commented on a diff in the pull request:

    https://github.com/apache/flink/pull/871#discussion_r34139797
  
    --- Diff: 
flink-staging/flink-ml/src/test/scala/org/apache/flink/ml/evaluation/ScoreTest.scala
 ---
    @@ -0,0 +1,118 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +
    +package org.apache.flink.ml.evaluation
    +
    +import org.apache.flink.api.scala._
    +import org.apache.flink.test.util.FlinkTestBase
    +import org.scalatest.{FlatSpec, Matchers}
    +
    +
    +class ScoreTest
    +  extends FlatSpec
    +   with Matchers
    +   with FlinkTestBase {
    +
    +  behavior of "Evaluation Score functions"
    +
    +  it should "work for squared loss" in {
    +    val env = ExecutionEnvironment.getExecutionEnvironment
    +
    +    val yy = env.fromCollection(Seq((0.0, 1.0), (0.0, 0.0), (3.0, 5.0)))
    +
    +    val loss = RegressionScores.squaredLoss
    +
    +    val result = loss.evaluate(yy).collect()
    +
    +    result.length shouldBe 1
    +    result.head shouldBe (1.6666666666 +- 1e-4)
    +  }
    +
    +  it should "work for zero one loss" in {
    +    val env = ExecutionEnvironment.getExecutionEnvironment
    +
    +    val yy = env.fromCollection(Seq("a" -> "a", "a" -> "b", "b" -> "c", 
"d" -> "d"))
    +
    +    val loss = ClassificationScores.zeroOneLoss[String]
    +
    +    val result = loss.evaluate(yy).collect()
    +
    +    result.length shouldBe 1
    +    result.head shouldBe (0.5 +- 1e9)
    +  }
    +
    +  it should "work for zero one loss applied to signs" in {
    +    val env = ExecutionEnvironment.getExecutionEnvironment
    +
    +    val yy = env.fromCollection(Seq[(Double,Double)](
    +      -2.3 -> 2.3, -1.0 -> -10.5, 2.0 -> 3.0, 4.0 -> -5.0))
    +
    +    val loss = RegressionScores.zeroOneSignumLoss
    +
    +    val result = loss.evaluate(yy).collect()
    +
    +    result.length shouldBe 1
    +    result.head shouldBe (0.5 +- 1e9)
    +  }
    +
    +  it should "work for accuracy score" in {
    +    val env = ExecutionEnvironment.getExecutionEnvironment
    +
    +    val yy = env.fromCollection(Seq(0.0 -> 0.0, 1.0 -> 1.0, 2.0 -> 2.0, 
3.0 -> 2.0))
    +
    +    val accuracyScore = ClassificationScores.accuracyScore
    +
    +    val result = accuracyScore.evaluate(yy).collect()
    +
    +    result.length shouldBe 1
    +    result.head shouldBe (0.75 +- 1e9)
    +  }
    +
    +  it should "calculate the R2 score correctly" in {
    +    val env = ExecutionEnvironment.getExecutionEnvironment
    +
    +    // List of 50 (i, i + 1.0) tuples, where i the index
    +    val valueList = Range.Double(0.0, 50.0, 1.0).toList zip 
Range.Double(0.0, 50.0, 1.0).map(_ + 1)
    --- End diff --
    
    Nope, will remove.


> Create evaluation framework for ML library
> ------------------------------------------
>
>                 Key: FLINK-2157
>                 URL: https://issues.apache.org/jira/browse/FLINK-2157
>             Project: Flink
>          Issue Type: New Feature
>          Components: Machine Learning Library
>            Reporter: Till Rohrmann
>            Assignee: Theodore Vasiloudis
>              Labels: ML
>             Fix For: 0.10
>
>
> Currently, FlinkML lacks means to evaluate the performance of trained models. 
> It would be great to add some {{Evaluators}} which can calculate some score 
> based on the information about true and predicted labels. This could also be 
> used for the cross validation to choose the right hyper parameters.
> Possible scores could be F score [1], zero-one-loss score, etc.
> Resources
> [1] [http://en.wikipedia.org/wiki/F1_score]



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to