From 603ea2aeb8fc1f313e00188352e4d0ac12ba2694 Mon Sep 17 00:00:00 2001 From: Guoxia Wang Date: Mon, 19 Dec 2022 17:26:36 +0800 Subject: [PATCH] Fix the problem caused by removing fluid.layers.l2_normalize (#162) --- plsc/models/layers/partialfc.py | 5 +++-- plsc/nn/norm.py | 40 +++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 plsc/nn/norm.py diff --git a/plsc/models/layers/partialfc.py b/plsc/models/layers/partialfc.py index d44ec87199626..e8a1589ebcd22 100644 --- a/plsc/models/layers/partialfc.py +++ b/plsc/models/layers/partialfc.py @@ -21,6 +21,7 @@ from paddle.fluid.framework import EagerParamBase from plsc.utils import logger +from plsc.nn.norm import l2_normalize def _all_gather(tensor, group=None): @@ -225,8 +226,8 @@ def sparse_grad_hook_fn(): else: self.sub_weight = self.weight - norm_feature = paddle.fluid.layers.l2_normalize(total_feature, axis=1) - norm_weight = paddle.fluid.layers.l2_normalize(self.sub_weight, axis=1) + norm_feature = l2_normalize(total_feature, axis=1) + norm_weight = l2_normalize(self.sub_weight, axis=1) local_logit = paddle.matmul( norm_feature, norm_weight, transpose_y=True) diff --git a/plsc/nn/norm.py b/plsc/nn/norm.py new file mode 100644 index 0000000000000..7a152f4e563f6 --- /dev/null +++ b/plsc/nn/norm.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle import _C_ops + + +def l2_normalize(x, axis, epsilon=1e-12, name=None): + r""" + This op normalizes `x` along dimension `axis` using an L2 + norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes + .. math:: + y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }} + For `x` with more dimensions, this layer independently normalizes each 1-D + slice along dimension `axis`. + Args: + x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float16, float32 or float64. + axis(int): The axis on which to apply normalization. If `axis < 0`, \ + the dimension to normalization is rank(X) + axis. -1 is the + last dimension. + epsilon(float): The epsilon value is used to avoid division by zero, \ + the default value is 1e-12. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` + Returns: + Variable: The output has the same shape and data type with `x`. + """ + if len(x.shape) == 1: + axis = 0 + out, _ = _C_ops.norm(x, 1 if axis is None else axis, epsilon, False) + return out