Hello,
I am trying to build a triplet loss layer in theano implementing its
forward as well as backward pass. I have attached my code below. Can anyone
help me out whether the gradients implemented are correct or not?
import theano
import theano.tensor as T
import numpy as np
class triplet_loss_layer(theano.Op):
__props__ = ()
def __init__(self,alpha):
self.alpha = alpha
super(triplet_loss_layer, self).__init__()
itypes = [T.dmatrix,T.dmatrix,T.dmatrix]
otypes = [T.dscalar]
def perform(self, node, inputs, output_storage):
x = inputs[0]
y = inputs[1]
z = inputs[2]
out = output_storage[0]
dis1 = ((x-y)**2).sum(axis=1)
dis2 = ((x-z)**2).sum(axis=1)
s = dis1 - dis2 + self.alpha
s = np.maximum(s,0.0)
out[0] = np.mean(s)
def infer_shape(self, node, i0_shapes):
return [()]
def grad(self, inputs, output_grads):
x = inputs[0]
y = inputs[1]
z = inputs[2]
N = inputs[0].shape[0]
grad_idxa = ((2/N)*(z-y))*output_grads[0]
grad_idxp = ((2/N)*(y-x))*output_grads[0]
grad_idxn =((2/N)*(x-z))*output_grads[0]
return grad_idxa,grad_idxp,grad_idxn
--
---
You received this message because you are subscribed to the Google Groups
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.