Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace paddle/fluid/platform/bfloat16.h paddle/phi/common/bfloat16.h #63426

Merged
merged 2 commits into from
Apr 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion paddle/fluid/operators/activation_op.cu.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ limitations under the License. */

#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/kernels/funcs/activation_functor.h"

namespace paddle {
Expand Down
62 changes: 29 additions & 33 deletions paddle/fluid/operators/activation_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"

#include "paddle/phi/kernels/funcs/activation_functor.h"
Expand All @@ -53,13 +53,13 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context,
phi::DenseTensor** Out) {
auto x_var = context.InputVar("X");
auto out_var = context.OutputVar("Out");
PADDLE_ENFORCE_NOT_NULL(x_var,
platform::errors::NotFound(
"Cannot get input Variable X, variable name = %s",
context.InputName("X")));
PADDLE_ENFORCE_NOT_NULL(
x_var,
phi::errors::NotFound("Cannot get input Variable X, variable name = %s",
context.InputName("X")));
PADDLE_ENFORCE_NOT_NULL(
out_var,
platform::errors::NotFound(
phi::errors::NotFound(
"Cannot get output Variable Out, variable name = %s",
context.OutputName("Out")));
if (CanBeUsedBySelectedRows.count(context.Type())) {
Expand All @@ -73,9 +73,9 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context,

PADDLE_ENFORCE_NOT_NULL(
*Out,
platform::errors::NotFound("Cannot get the tensor from the Variable "
"Output(Out), variable name = %s",
context.OutputName("Out")));
phi::errors::NotFound("Cannot get the tensor from the Variable "
"Output(Out), variable name = %s",
context.OutputName("Out")));
}

template <ActBwdOpFwdDeps kDepValue>
Expand All @@ -94,23 +94,21 @@ inline void ExtractActivationGradTensor(
out_var = context.InputVar("Out");
PADDLE_ENFORCE_NOT_NULL(
out_var,
platform::errors::NotFound(
phi::errors::NotFound(
"Cannot get input Variable Out, variable name = %s",
context.InputName("Out")));
}

PADDLE_ENFORCE_NOT_NULL(
out_grad_var,
platform::errors::NotFound(
"Cannot get input Variable %s, variable name = %s",
framework::GradVarName("Out"),
context.InputName(framework::GradVarName("Out"))));
phi::errors::NotFound("Cannot get input Variable %s, variable name = %s",
framework::GradVarName("Out"),
context.InputName(framework::GradVarName("Out"))));
PADDLE_ENFORCE_NOT_NULL(
x_grad_var,
platform::errors::NotFound(
"Cannot get output Variable %s, variable name = %s",
framework::GradVarName("X"),
context.OutputName(framework::GradVarName("X"))));
phi::errors::NotFound("Cannot get output Variable %s, variable name = %s",
framework::GradVarName("X"),
context.OutputName(framework::GradVarName("X"))));

if (CanBeUsedBySelectedRows.count(context.Type())) {
*dOut = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(
Expand All @@ -137,19 +135,19 @@ inline void ExtractActivationGradTensor(
}
}

PADDLE_ENFORCE_NOT_NULL(*dX,
platform::errors::NotFound(
"Cannot get the tensor from the Variable "
"Output(Out), variable name = %s",
context.OutputName(framework::GradVarName("X"))));
PADDLE_ENFORCE_NOT_NULL(
*dX,
phi::errors::NotFound("Cannot get the tensor from the Variable "
"Output(Out), variable name = %s",
context.OutputName(framework::GradVarName("X"))));

if (static_cast<int>(kDepValue) & static_cast<int>(ActBwdOpFwdDeps::kDepX)) {
auto x_var = context.InputVar("X");
PADDLE_ENFORCE_NOT_NULL(
x_var,
platform::errors::NotFound("Cannot get the tensor from the "
"Variable Input(X), variable name = %s",
context.InputName("X")));
phi::errors::NotFound("Cannot get the tensor from the "
"Variable Input(X), variable name = %s",
context.InputName("X")));
if (CanBeUsedBySelectedRows.count(context.Type())) {
*X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
} else {
Expand Down Expand Up @@ -384,26 +382,24 @@ inline void ExtractDoubleGradTensorWithInputDOut(
auto ddo_var = ctx.OutputVar("DDOut");
PADDLE_ENFORCE_NOT_NULL(
ddx_var,
platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s",
ctx.InputName("DDX")));
phi::errors::NotFound("Cannot get input Variable Out, variable name = %s",
ctx.InputName("DDX")));
*ddX = ctx.Input<phi::DenseTensor>("DDX");
if (ddo_var) {
*ddOut = ctx.Output<phi::DenseTensor>("DDOut");
}
PADDLE_ENFORCE_NOT_NULL(
ddX,
platform::errors::NotFound(
phi::errors::NotFound(
"Cannot get the tensor from the Variable DDX, variable name = %s",
ctx.OutputName("DDX")));

// extract x(input), dx(output)
auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE_NOT_NULL(
x_var,
platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s",
ctx.InputName("X")));
phi::errors::NotFound("Cannot get input Variable Out, variable name = %s",
ctx.InputName("X")));
auto dx_var = ctx.OutputVar("DX");
*X = ctx.Input<phi::DenseTensor>("X");
if (dx_var) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/add_position_encoding_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker {
PADDLE_ENFORCE_GE(
alpha,
0.0f,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"Attribute 'alpha' must be greater than or equal to 0.0."));
});
AddAttr<float>("beta", "The scale of Position Embedding.")
Expand All @@ -81,7 +81,7 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker {
PADDLE_ENFORCE_GE(
beta,
0.0f,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"Attribute 'beta' must be greater than or equal to 0.0."));
});
AddComment(R"DOC(
Expand Down
20 changes: 10 additions & 10 deletions paddle/fluid/operators/add_position_encoding_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class AddPositionEncodingKernel : public framework::OpKernel<T> {
if (x_lod.empty()) {
PADDLE_ENFORCE_EQ(x_dim.size(),
3,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The input(X)'s dimension of AddPositionEncodingOp "
"should be equal to "
"3, but received %d. ",
Expand All @@ -52,14 +52,14 @@ class AddPositionEncodingKernel : public framework::OpKernel<T> {
} else {
PADDLE_ENFORCE_EQ(x_dim.size(),
2,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The input(X)'s dimension of AddPositionEncodingOp "
"should be equal to "
"2, but received %d. ",
x_dim.size()));
PADDLE_ENFORCE_EQ(x_lod.size(),
1,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The input(X)'s lod level of AddPositionEncodingOp "
"should be equal to "
"1, but received %d. ",
Expand All @@ -70,13 +70,13 @@ class AddPositionEncodingKernel : public framework::OpKernel<T> {
enc_size = x_dim[1];
}

PADDLE_ENFORCE_EQ(enc_size % 2,
0,
platform::errors::InvalidArgument(
"The input(X)'s feature size of "
"AddPositionEncodingOp only support even, "
"but received an odd number: %d. ",
enc_size));
PADDLE_ENFORCE_EQ(
enc_size % 2,
0,
phi::errors::InvalidArgument("The input(X)'s feature size of "
"AddPositionEncodingOp only support even, "
"but received an odd number: %d. ",
enc_size));

const int half_size = enc_size / 2;
for (int i = 0; i < batch_size; ++i) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/affine_channel_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,21 +80,21 @@ class AffineChannelOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(
scale_dims.size(),
1UL,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The dimensions of Input(Scale) must be 1,"
"But received the dimensions of Input(Scale) is [%d] ",
scale_dims.size()));
PADDLE_ENFORCE_EQ(b_dims.size(),
1UL,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The dimensions of Input(Bias) must be 1,"
"But received the dimensions of Input(Bias) is [%d] ",
scale_dims.size()));
if (ctx->IsRuntime() || scale_dims[0] > 0) {
PADDLE_ENFORCE_EQ(
scale_dims[0],
C,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The first dimension value of Input(Scale) must be [%d],"
"But received [%d].",
C,
Expand All @@ -104,7 +104,7 @@ class AffineChannelOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(
b_dims[0],
C,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The first dimension value of Input(Bias) must be [%d],"
"But received [%d].",
C,
Expand Down
18 changes: 9 additions & 9 deletions paddle/fluid/operators/affine_channel_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,15 @@ class AffineChannelXPUKernel : public framework::OpKernel<T> {
dev_ctx.x_context(), x_d, scale_d, y_d, x_shape, b_shape);
PADDLE_ENFORCE_EQ(r,
xpu::Error_t::SUCCESS,
platform::errors::External(
phi::errors::External(
"The broadcast_mul XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
r = xpu::broadcast_add(
dev_ctx.x_context(), y_d, bias_d, y_d, x_shape, b_shape);
PADDLE_ENFORCE_EQ(r,
xpu::Error_t::SUCCESS,
platform::errors::External(
phi::errors::External(
"The broadcast_add XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
Expand Down Expand Up @@ -140,28 +140,28 @@ class AffineChannelGradXPUKernel : public framework::OpKernel<T> {
dev_ctx.x_context(), dy_d, dbias_d, x_shape, rdims);
PADDLE_ENFORCE_EQ(r,
xpu::Error_t::SUCCESS,
platform::errors::External(
phi::errors::External(
"The reduce_sum XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
xpu::ctx_guard RAII_GUARD(dev_ctx.x_context());
T* tmp = RAII_GUARD.alloc_l3_or_gm<T>(dy->numel());
PADDLE_ENFORCE_NOT_NULL(
tmp, platform::errors::External("XPU has no enough memory"));
tmp, phi::errors::External("XPU has no enough memory"));

r = xpu::mul<T>(
dev_ctx.x_context(), dy_d, x->data<T>(), tmp, dy->numel());
PADDLE_ENFORCE_EQ(
r,
xpu::Error_t::SUCCESS,
platform::errors::External("The mul XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
phi::errors::External("The mul XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
r = xpu::reduce_sum<T>(
dev_ctx.x_context(), tmp, dscale_d, x_shape, rdims);
PADDLE_ENFORCE_EQ(r,
xpu::Error_t::SUCCESS,
platform::errors::External(
phi::errors::External(
"The reduce_sum XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
Expand All @@ -172,7 +172,7 @@ class AffineChannelGradXPUKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(
r,
xpu::Error_t::SUCCESS,
platform::errors::External(
phi::errors::External(
"The broadcast_mul XPU OP return wrong value[%d %s]",
r,
XPUAPIErrorMsg[r]));
Expand Down
17 changes: 8 additions & 9 deletions paddle/fluid/operators/array_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,15 @@ class ArrayOp : public framework::OperatorBase {
size_t GetOffset(const framework::Scope &scope,
const platform::Place &place) const {
auto *i = scope.FindVar(Input("I"));
PADDLE_ENFORCE_NOT_NULL(
i, platform::errors::NotFound("Input(I) is not found."));
PADDLE_ENFORCE_NOT_NULL(i, phi::errors::NotFound("Input(I) is not found."));
auto &i_tensor = i->Get<phi::DenseTensor>();
PADDLE_ENFORCE_EQ(i_tensor.numel(),
1,
platform::errors::InvalidArgument(
"Input(I) must have numel 1. "
"But received %d, and it's shape is [%s].",
i_tensor.numel(),
i_tensor.dims()));
PADDLE_ENFORCE_EQ(
i_tensor.numel(),
1,
phi::errors::InvalidArgument("Input(I) must have numel 1. "
"But received %d, and it's shape is [%s].",
i_tensor.numel(),
i_tensor.dims()));

// get device context from pool
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
Expand Down
Loading