diff --git a/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op_xpu.cc b/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op_xpu.cc index 695c28d77554a..fa17b99e79f2a 100644 --- a/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op_xpu.cc +++ b/paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op_xpu.cc @@ -131,13 +131,6 @@ struct CSoftmaxWithCrossEntropyProcessGroupFunctor { }; phi::XPUElementwise( dev_ctx, logits_2d, logits_max, axis, &softmax_2d, f); - ret = xpu::clip(dev_ctx.x_context(), - reinterpret_cast(softmax_2d.data()), - reinterpret_cast(softmax_2d.data()), - N * D, - -64., - 0.); - PADDLE_ENFORCE_XDNN_SUCCESS(ret, "clip"); } // step 3, obtain predict target @@ -335,13 +328,6 @@ struct CSoftmaxWithCrossEntropyFunctor { }; phi::XPUElementwise( dev_ctx, logits_2d, logits_max, axis, &softmax_2d, f); - ret = xpu::clip(dev_ctx.x_context(), - reinterpret_cast(softmax_2d.data()), - reinterpret_cast(softmax_2d.data()), - N * D, - -64., - 0.); - PADDLE_ENFORCE_XDNN_SUCCESS(ret, "clip"); } // step 3, obtain predict target diff --git a/test/xpu/collective_softmax_with_cross_entropy_op_xpu.py b/test/xpu/collective_softmax_with_cross_entropy_op_xpu.py index e450811479412..abb94cc6ad15b 100644 --- a/test/xpu/collective_softmax_with_cross_entropy_op_xpu.py +++ b/test/xpu/collective_softmax_with_cross_entropy_op_xpu.py @@ -133,7 +133,7 @@ def run_trainer(self, args): # each xpu uses own half of logits np.random.seed(os.getpid()) logits = np.random.uniform( - low=-10.0, high=10.0, size=(self.batch_size, self.local_elements) + low=-40.0, high=40.0, size=(self.batch_size, self.local_elements) ).astype(np_data_type) out = exe.run( train_prog, diff --git a/test/xpu/test_collective_softmax_with_cross_entropy_xpu.py b/test/xpu/test_collective_softmax_with_cross_entropy_xpu.py index 0bc75c7a4930b..9346f004f83ad 100644 --- a/test/xpu/test_collective_softmax_with_cross_entropy_xpu.py +++ b/test/xpu/test_collective_softmax_with_cross_entropy_xpu.py @@ -33,7 +33,7 @@ def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.0) + shiftx = x - np.max(x) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -131,13 +131,13 @@ def check_with_place( # get input data for rank 0 np.random.seed(pid0) input0 = np.random.uniform( - low=-10.0, high=10.0, size=(self.batch_size, local_elements) + low=-40.0, high=40.0, size=(self.batch_size, local_elements) ).astype(np_data_type) # get input data for rank 1 np.random.seed(pid1) input1 = np.random.uniform( - low=-10.0, high=10.0, size=(self.batch_size, local_elements) + low=-40.0, high=40.0, size=(self.batch_size, local_elements) ).astype(np_data_type) # get combined input data