Skip to content

Commit

Permalink
Fix multi tensor momentum regular bug (PaddlePaddle#38344)
Browse files Browse the repository at this point in the history
* fix merged_momentum regular bug

* fix bug
  • Loading branch information
zhangbo9674 authored and zmxdream committed Dec 25, 2021
1 parent 41c0f48 commit 4c9d12d
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions python/paddle/optimizer/momentum.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def __init__(self,

def _update_regularization(self, weight_decay):
reg_method = ""
reg_coeff = 0
reg_coeff = 0.0

if (isinstance(weight_decay, L2DecayRegularizer)):
reg_method = "l2_decay"
Expand Down Expand Up @@ -306,7 +306,7 @@ def _append_optimize_op(self, block, param_and_grad):
# the param's regularization has been done before, we avoid do l2decay in momentum.
elif param.regularizer is not None:
regularization_method = ""
regularization_coeff = 0
regularization_coeff = 0.0

find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
Expand Down Expand Up @@ -380,7 +380,7 @@ def _multi_tensor_init(self, target_block, parameters):
if isinstance(param.regularizer, L2DecayRegularizer):
regularization_method = "l2_decay"
regularization_coeff = param.regularizer._regularization_coeff
else:
elif param.regularizer is not None:
regularization_method = ""
regularization_coeff = 0.0
if param.dtype == paddle.float32:
Expand Down

0 comments on commit 4c9d12d

Please sign in to comment.