zhreshold commented on a change in pull request #8005: add warmup lr_scheduler
URL: https://github.com/apache/incubator-mxnet/pull/8005#discussion_r141416042
##########
File path: python/mxnet/lr_scheduler.py
##########
@@ -136,3 +136,80 @@ def __call__(self, num_update):
else:
return self.base_lr
return self.base_lr
+
+class GradualWarmupWithMultiFactorScheduler(LRScheduler):
+ def __init__(self, warmup_step = 100, begin_lr=0.01, stop_lr=0.1,
multifactor_step = 100, factor = 1):
+ """Warmup the learning rate by a const value for first 'warmup_step'
steps.
+
+ It returns a new learning rate by::
+
+ begin_lr + num_update * const_update
+
+ Then reduce the learning rate by given a list of steps as
MultiFactorScheduler.
+
+ Parameters
+ ----------
+ warmup_step : int
+ Changes the learning rate for first 'warmup_step' updates.
+ begin_lr : float, optional
+ The learning rate at begin.
+ stop_lr : float, optional
+ Stop updating the learning rate if it is less than this value.
+ multifactor_step: list of int
+ The list of steps to schedule a change
+ factor: float
+ The factor to change the learning rate.
+ """
+ super(GradualWarmupWithMultiFactorScheduler, self).__init__()
+ if stop_lr <= begin_lr:
+ raise ValueError("stop_lr must larger than begin_lr")
+ assert isinstance(multifactor_step, list) and len(multifactor_step) >=
1
Review comment:
default multifactor_step == int will break, add cast to list first
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services