remove duplicated precision

Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/32069049
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/32069049
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/32069049

Branch: refs/heads/master
Commit: 320690499c4ff3b0ca27edbe0962cca2b386f0fd
Parents: f7caf46
Author: RUAN0007 <[email protected]>
Authored: Tue Mar 7 12:07:50 2017 +0800
Committer: RUAN0007 <[email protected]>
Committed: Tue Mar 7 12:07:50 2017 +0800

----------------------------------------------------------------------
 python/singa/metric.py | 65 +++------------------------------------------
 1 file changed, 3 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/32069049/python/singa/metric.py
----------------------------------------------------------------------
diff --git a/python/singa/metric.py b/python/singa/metric.py
index 6ddcd27..64a1b72 100644
--- a/python/singa/metric.py
+++ b/python/singa/metric.py
@@ -87,65 +87,6 @@ class Accuracy(Metric):
         self.swig_metric = singa.Accuracy()
 
 
-class Precision(Metric):
-    '''Make the top-k labels of max probability as the prediction
-
-    Compute the precision against the groundtruth labels
-    '''
-    def __init__(self, top_k):
-        self.top_k = top_k
-
-
-
-    def forward(self, x, y):
-        '''Compute the precision for each sample.
-
-        Convert tensor to numpy for computation
-
-        Args:
-            x (Tensor): predictions, one row per sample
-            y (Tensor): ground truth labels, one row per sample
-
-        Returns:
-            a tensor of floats, one per sample
-        '''
-
-        dev = x.device
-        x.to_host()
-        y.to_host()
-
-        x_np = tensor.to_numpy(x)
-        y_np = tensor.to_numpy(y)
-
-        pred_np = np.argsort(-x_np)[:,0:self.top_k] #Sort in descending order
-
-        tmp_np = np.zeros(pred_np.shape, dtype=np.float32)
-
-        for i in range(pred_np.shape[0]):
-            tmp_np[i] = y_np[i,pred_np[i]]
-
-        prcs_np = np.average(tmp_np, axis=1)
-
-        prcs = tensor.from_numpy(prcs_np)
-
-        x.to_device(dev)
-        y.to_device(dev)
-        prcs.to_device(dev)
-
-        return prcs
-
-
-    def evaluate(self, x, y):
-        '''Compute the averaged precision over all samples.
-
-        Args:
-            x (Tensor): predictions, one row per sample
-            y (Tensor): ground truth values, one row per sample
-        Returns:
-            a float value for the averaged metric
-        '''
-
-        return tensor.average(self.forward(x,y))
 
 
 class Precision(Metric):
@@ -178,7 +119,7 @@ class Precision(Metric):
         x_np = tensor.to_numpy(x)
         y_np = tensor.to_numpy(y)
 
-        pred_np = np.argsort(-x_np)[:,0:self.top_k] #Sort in descending order
+        pred_np = np.argsort(-x_np)[:, 0:self.top_k] #Sort in descending order
 
         prcs_np = np.zeros(pred_np.shape[0], dtype=np.float32)
 
@@ -209,7 +150,7 @@ class Precision(Metric):
             a float value for the averaged metric
         '''
 
-        return tensor.average(self.forward(x,y))
+        return tensor.average(self.forward(x, y))
 
 
 class Recall(Metric):
@@ -241,7 +182,7 @@ class Recall(Metric):
         x_np = tensor.to_numpy(x)
         y_np = tensor.to_numpy(y)
 
-        pred_np = np.argsort(-x_np)[:,0:self.top_k] #Sort in descending order
+        pred_np = np.argsort(-x_np)[:, 0:self.top_k] #Sort in descending order
 
         recall_np = np.zeros(pred_np.shape[0], dtype=np.float32)
 

Reply via email to