Skip to content

Commit

Permalink
Merge pull request PaddlePaddle#118
Browse files Browse the repository at this point in the history
fix bug of _C_ops according PaddlePaddle upgrade
  • Loading branch information
GuoxiaWang committed Aug 29, 2022
2 parents 55bc4f9 + ef726a5 commit 466d877
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 5 deletions.
3 changes: 2 additions & 1 deletion plsc/core/grad_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

import warnings
import paddle
from paddle import _legacy_C_ops as _C_ops


def _squared_l2_norm(x):
Expand All @@ -22,7 +23,7 @@ def _squared_l2_norm(x):
sum_square = paddle.sum(square)
return sum_square

return paddle._C_ops.squared_l2_norm(x)
return _C_ops.squared_l2_norm(x)


class ClipGradByGlobalNorm(object):
Expand Down
2 changes: 1 addition & 1 deletion plsc/core/grad_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from collections import defaultdict
from paddle.amp import GradScaler as FrameworkGradScaler
from paddle.fluid.dygraph.amp import OptimizerState
from paddle import _C_ops
from paddle import _legacy_C_ops as _C_ops
import paddle


Expand Down
3 changes: 2 additions & 1 deletion plsc/optimizer/adamw.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import math
import paddle
from paddle import _legacy_C_ops as _C_ops
from .optimizer import Optimizer
from plsc.utils import logger

Expand Down Expand Up @@ -100,7 +101,7 @@ def step(self):
paddle.float16, paddle.bfloat16
}:
master_param = state['master_param']
_, _, _, _, _, _ = paddle._C_ops.adamw(
_, _, _, _, _, _ = _C_ops.adamw(
p, grad,
paddle.to_tensor(lr), exp_avg, exp_avg_sq, beta1_pow,
beta2_pow, master_param, p, exp_avg, exp_avg_sq, beta1_pow,
Expand Down
5 changes: 3 additions & 2 deletions plsc/optimizer/momentum.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import math
import paddle
from paddle import _legacy_C_ops as _C_ops
from .optimizer import Optimizer
from plsc.utils import logger

Expand Down Expand Up @@ -101,7 +102,7 @@ def step(self):
axis = getattr(p, 'axis', None)
assert index is not None
assert axis is not None
_, _, _ = paddle._C_ops.sparse_momentum(
_, _, _ = _C_ops.sparse_momentum(
p,
grad,
exp_avg,
Expand All @@ -125,7 +126,7 @@ def step(self):
'multi_precision',
master_param is not None)
else:
_, _, _ = paddle._C_ops.momentum(
_, _, _ = _C_ops.momentum(
p,
grad,
exp_avg,
Expand Down

0 comments on commit 466d877

Please sign in to comment.