thirdwing commented on issue #3368: custom loss symbol in R/Python
URL: 
https://github.com/apache/incubator-mxnet/issues/3368#issuecomment-320108394
 
 
   @khalida I have updated the document using your example.
   
   ```r
   # Network config
   optimizer <- "rmsprop"
   batchSize <- 60
   nRounds <- 50
   nHidden <- 14
   verbose <- FALSE
   array.layout <- "rowmajor"
   
   library(mxnet)
   data(BostonHousing, package="mlbench")
   BostonHousing[, sapply(BostonHousing, is.factor)] <-
     as.numeric(as.character(BostonHousing[, sapply(BostonHousing, is.factor)]))
   BostonHousing <- data.frame(scale(BostonHousing))
   
   test.ind = seq(1, 506, 5)    # 1 pt in 5 used for testing
   train.x = data.matrix(BostonHousing[-test.ind, -14])
   train.y = BostonHousing[-test.ind, 14]
   test.x = data.matrix(BostonHousing[--test.ind, -14])
   test.y = BostonHousing[--test.ind, 14]
   data <- mx.symbol.Variable("data")
   label <- mx.symbol.Variable("label")
   fc1 <- mx.symbol.FullyConnected(data, num_hidden=nHidden, name="fc1")
   tanh1 <- mx.symbol.Activation(fc1, act_type="tanh", name="tanh1")
   fc2 <- mx.symbol.FullyConnected(tanh1, num_hidden=1, name="fc2")
   lro <- mx.symbol.LinearRegressionOutput(fc2, name="lro")
   
   mx.set.seed(0)
   model <- mx.model.FeedForward.create(lro,
                                        X=train.x, y=train.y,
                                        eval.data=list(data=test.x, 
label=test.y),
                                        ctx=mx.cpu(), num.round=nRounds,
                                        array.batch.size=batchSize,
                                        eval.metric=mx.metric.rmse,
                                        optimizer=optimizer, verbose=verbose,
                                        array.layout=array.layout)
   
   pred <- predict(model, test.x)
   
   lro2 <- mx.symbol.MakeLoss(mx.symbol.square(mx.symbol.Reshape(fc2, shape = 
0) - label), name="lro2")
   
   mx.set.seed(0)
   model2 <- mx.model.FeedForward.create(lro2,
                                        X=train.x, y=train.y,
                                        eval.data=list(data=test.x, 
label=test.y),
                                        ctx=mx.cpu(), num.round=nRounds,
                                        array.batch.size=batchSize,
                                        eval.metric=mx.metric.rmse,
                                        optimizer=optimizer, verbose=verbose,
                                        array.layout=array.layout)
   
   
   internals = internals(model2$symbol)
   fc_symbol = internals[[match("fc2_output", outputs(internals))]]
   
   model3 <- list(symbol = fc_symbol,
                  arg.params = model2$arg.params,
                  aux.params = model2$aux.params)
   
   class(model3) <- "MXFeedForwardModel"
   
   pred3 <- predict(model3, test.x)
   
   # Plotting of fits
   par(mfrow=c(1,2))
   
   # Train fits
   plot(test.y, pred[1,], main="nnet Train Fit", xlab="Target", ylab="Response")
   abline(0,1, col="red", lwd=2)
   
   plot(test.y, pred3[1,], main="nnet MakeLoss square Train Fit", 
xlab="Target",  ylab="Response")
   abline(0,1, col="red", lwd=2)
   ```
   
   
![res](https://user-images.githubusercontent.com/1547093/28946777-2dfa21f8-7861-11e7-9da7-96e4525b8142.png)
   
   The output of `mx.symbol.MakeLoss` is the gradient of loss with respect to 
the input data.
   
   So currently the metric doesn't work with `MakeLoss` during the training 
process.
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to