diff --git a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py index ee8e5834967cc..96a818549e700 100644 --- a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py @@ -325,7 +325,7 @@ def one_cycle_lr(epoch_num, max_learning_rate, total_steps, divide_factor=25, - end_lr=0.0001, + end_learning_rate=0.0001, phase_pct=0.3, anneal_strategy='cos', three_phase=False, @@ -347,7 +347,7 @@ def one_cycle_lr(epoch_num, }, { 'start_lr': initial_lr, - 'end_lr': end_lr, + 'end_lr': end_learning_rate, }, ] else: @@ -359,7 +359,7 @@ def one_cycle_lr(epoch_num, }, { 'start_lr': max_learning_rate, - 'end_lr': end_lr, + 'end_lr': end_learning_rate, }, ] @@ -539,10 +539,10 @@ def test_scheduler(self): max_learning_rate=-1.5, total_steps=20) with self.assertRaises(TypeError): paddle.optimizer.lr.OneCycleLR( - max_learning_rate=0.1, total_steps=20, end_lr='test') + max_learning_rate=0.1, total_steps=20, end_learning_rate='test') with self.assertRaises(ValueError): paddle.optimizer.lr.OneCycleLR( - max_learning_rate=0.1, total_steps=20, end_lr=-1) + max_learning_rate=0.1, total_steps=20, end_learning_rate=-1) with self.assertRaises(TypeError): paddle.optimizer.lr.OneCycleLR( max_learning_rate=0.1, total_steps='test') @@ -622,7 +622,7 @@ def test_scheduler(self): "max_learning_rate": 0.1, "total_steps": 20, "divide_factor": 5, - "end_lr": 0.0001, + "end_learning_rate": 0.0001, "anneal_strategy": 'cos', "phase_pct": 0.3, "three_phase": False, @@ -630,7 +630,7 @@ def test_scheduler(self): "max_learning_rate": 0.5, "total_steps": 20, "divide_factor": 10, - "end_lr": 0.001, + "end_learning_rate": 0.001, "anneal_strategy": 'linear', "phase_pct": 0.4, "three_phase": False, @@ -638,7 +638,7 @@ def test_scheduler(self): "max_learning_rate": 1.0, "total_steps": 20, "divide_factor": 9, - "end_lr": 0.0001, + "end_learning_rate": 0.0001, "anneal_strategy": 'cos', "phase_pct": 0.3, "three_phase": True, @@ -646,7 +646,7 @@ def test_scheduler(self): "max_learning_rate": 0.3, "total_steps": 20, "divide_factor": 25, - "end_lr": 0.0005, + "end_learning_rate": 0.0005, "anneal_strategy": 'linear', "phase_pct": 0.2, "three_phase": True, diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index c3de4b781174b..12b8272707bd8 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1613,7 +1613,7 @@ class OneCycleLR(LRScheduler): Functionally, it defines the initial learning rate by ``divide_factor`` . total_steps (int): Number of total training steps. divide_factor (float): Initial learning rate will be determined by initial_learning_rate = max_learning_rate / divide_factor. Default: 25. - end_lr (float, optional): The minimum learning rate during training, it should be much less than initial learning rate. + end_learning_rate (float, optional): The minimum learning rate during training, it should be much less than initial learning rate. phase_pct (float): The percentage of total steps which used to increasing learning rate. Default: 0.3. anneal_strategy (str, optional): Strategy of adjusting learning rate.'cos' for cosine annealing, 'linear' for linear annealing. Default: 'cos'. @@ -1682,7 +1682,7 @@ def __init__(self, max_learning_rate, total_steps, divide_factor=25., - end_lr=0.0001, + end_learning_rate=0.0001, phase_pct=0.3, anneal_strategy='cos', three_phase=False, @@ -1696,13 +1696,13 @@ def __init__(self, if max_learning_rate < 0: raise ValueError("'max_learning_rate' must be a positive integer.") - # Check type and value of end_lr - if not isinstance(end_lr, (float, int)): + # Check type and value of end_learning_rate + if not isinstance(end_learning_rate, (float, int)): raise TypeError( - "'end_lr' must be 'float' or 'int', but received {}".format( - type(total_steps))) - if end_lr < 0: - raise ValueError("'end_lr' must be a positive integer.") + "'end_learning_rate' must be 'float' or 'int', but received {}". + format(type(total_steps))) + if end_learning_rate < 0: + raise ValueError("'end_learning_rate' must be a positive integer.") # Check type and value of total_steps if not isinstance(total_steps, int): @@ -1728,7 +1728,7 @@ def __init__(self, format(type(divide_factor))) initial_lr = max_learning_rate / float(divide_factor) - min_lr = float(end_lr) + min_lr = float(end_learning_rate) if three_phase: if phase_pct >= 0.5: