This is an automated email from the ASF dual-hosted git repository.

okislal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/madlib.git

commit 64ed9bb3af60fd75f5a21c1f08770f6ca08d2103
Author: Nikhil Kak <[email protected]>
AuthorDate: Fri Apr 30 17:38:38 2021 -0700

    MLP: Set lambda value for minibatch
    
    Previously in our mlp minibatch code, we were not setting the lambda value
    correctly. This meant that in the function `getLossAndUpdateModel`, we would
    always use the default value of 0 even if the user passed in a non zero 
value.
    
    This commit fixes it by setting the lambda value before calling the
    getLossAndUpdateModel function
    
    Co-authored-by: Ekta Khanna <[email protected]>
---
 src/modules/convex/algo/igd.hpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/modules/convex/algo/igd.hpp b/src/modules/convex/algo/igd.hpp
index 3ffe27c..ac3c498 100644
--- a/src/modules/convex/algo/igd.hpp
+++ b/src/modules/convex/algo/igd.hpp
@@ -146,6 +146,7 @@ IGD<State, ConstState, Task>::merge(state_type &state,
                 Y_batch = tuple.depVar.block(curr_batch_row_index, 0,
                                              batch_size, tuple.depVar.cols());
             }
+            Task::lambda = state.lambda;
             loss += Task::getLossAndUpdateModel(
                 state.model, X_batch, Y_batch, state.stepsize);
         }
@@ -225,6 +226,7 @@ IGD<State, ConstState, Task>::merge(state_type &state,
                                              batch_size, tuple.depVar.cols());
             }
             t++;
+            Task::lambda = state.lambda;
             loss += Task::getLossAndUpdateModelALR(state.model, X_batch, 
Y_batch,
                                                 state.stepsize, state.opt_code,
                                                 state.rho, m, state.beta1,

Reply via email to