From be4f61d9cd74f71e91f2cbea7e7248d7a4a2a1df Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Wed, 1 Mar 2023 08:45:37 +0000 Subject: [PATCH 01/12] Add function node in phi_kernel for MKLDNN --- paddle/fluid/framework/data_transform.cc | 19 + paddle/fluid/framework/data_transform.h | 8 + .../new_executor/interpreter/data_transfer.cc | 26 +- paddle/fluid/framework/operator.cc | 20 + paddle/fluid/operators/interpolate_v2_op.cc | 813 ------------------ paddle/phi/api/yaml/backward.yaml | 75 ++ paddle/phi/api/yaml/legacy_backward.yaml | 60 -- paddle/phi/api/yaml/legacy_ops.yaml | 55 -- paddle/phi/api/yaml/op_compat.yaml | 28 + paddle/phi/api/yaml/ops.yaml | 65 ++ paddle/phi/core/infer_varkernel_utils.h | 67 ++ paddle/phi/core/kernel_factory.h | 4 +- .../phi/kernels/onednn/interpolate_kernel.cc | 42 +- paddle/phi/ops/compat/interpolate_sig.cc | 201 ----- 14 files changed, 348 insertions(+), 1135 deletions(-) delete mode 100644 paddle/fluid/operators/interpolate_v2_op.cc create mode 100644 paddle/phi/core/infer_varkernel_utils.h delete mode 100644 paddle/phi/ops/compat/interpolate_sig.cc diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 38e1ce1c3141f..7f403e607fedd 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -156,5 +156,24 @@ void SetTensorToVariable(const Variable &in_var, } } +phi::InferVarKernelContext BuildInferVarKernelContext( + const phi::KernelKey &kernel_key, + const AttributeMap &fluid_attrs, + phi::AttributeMap *phi_attrs, + bool has_infer_varkernel_fn) { + if (has_infer_varkernel_fn) { + for (auto &attr : fluid_attrs) { + switch (attr.second.index()) { + case 3: // string + (*phi_attrs)[attr.first] = paddle::get<3>(attr.second); + break; + default: + break; + } + } + } + return phi::InferVarKernelContext(&kernel_key, &phi_attrs); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/data_transform.h b/paddle/fluid/framework/data_transform.h index 004742e2a4479..b113bd4d1118e 100644 --- a/paddle/fluid/framework/data_transform.h +++ b/paddle/fluid/framework/data_transform.h @@ -25,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/macros.h" #include "paddle/phi/common/transform.h" +#include "paddle/phi/core/infer_varkernel_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { @@ -45,5 +46,12 @@ void TransformData(const phi::KernelKey &expected_kernel_type, void SetTensorToVariable(const Variable &in_var, const phi::DenseTensor &tensor, Variable *out_var); + +phi::InferVarKernelContext BuildInferVarKernelContext( + const phi::KernelKey &kernel_key, + const AttributeMap &fluid_attrs, + phi::AttributeMap *phi_attrs, + bool has_infer_varkernel_fn); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc index 9e96e9cb403ac..f57842a8d4e30 100644 --- a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/new_executor/interpreter/data_transfer.h" #include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/data_transform.h" #include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h" #include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_factory.h" @@ -470,7 +471,20 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, bool transfered = false; DataTranferHelper data_transfer_helper(place, var_scope, local_scope); - + phi::Kernel* phi_kernel = op_func_node->phi_kernel_; + auto has_infer_varkernel_fn = (phi_kernel && phi_kernel->IsValid() && + phi_kernel->GetKernelRegisteredType() == + phi::KernelRegisteredType::FUNCTION && + phi_kernel->infer_var_kernel_fn_ != nullptr); + phi::AttributeMap infer_attrs{}; + auto fluid_attrs = + static_cast(op_base)->Attrs(); + phi::InferVarKernelContext infer_varkernel_context = + BuildInferVarKernelContext( + framework::TransOpKernelTypeToPhiKernelKey(expected_kernel_key), + fluid_attrs, + &infer_attrs, + has_infer_varkernel_fn); auto apply_data_transform_for_one_parameter = [&](const std::string& parameter_name, const std::vector& argument_names, @@ -551,7 +565,14 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, *tensor_in, framework::TransOpKernelTypeToPhiKernelKey( expected_kernel_key)); - + if (has_infer_varkernel_fn) { + infer_varkernel_context.SetVarName( + const_cast(¶meter_name)); + infer_varkernel_context.SetDenseTensor( + const_cast(tensor_in)); + kernel_key_for_var = + phi_kernel->infer_var_kernel_fn_(&infer_varkernel_context); + } std::unique_ptr expected_kernel_key_for_argument_def = nullptr; if (argument_def && @@ -630,7 +651,6 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, } }; - phi::Kernel* phi_kernel = op_func_node->phi_kernel_; if (phi_kernel && phi_kernel->IsValid() && phi_kernel->GetKernelRegisteredType() == phi::KernelRegisteredType::FUNCTION) { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 747a17bd05e08..0f152569ec568 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -39,6 +39,7 @@ limitations under the License. */ #include "paddle/phi/common/int_array.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/infer_varkernel_utils.h" #include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_factory.h" #include "paddle/phi/ops/compat/signatures.h" @@ -2495,6 +2496,18 @@ Scope* OperatorWithKernel::PrepareData( } } + auto has_infer_varkernel_fn = (run_phi_kernel_ && + phi_kernel_->GetKernelRegisteredType() == + phi::KernelRegisteredType::FUNCTION && + phi_kernel_->infer_var_kernel_fn_ != nullptr); + phi::AttributeMap infer_attrs{}; + auto fluid_attrs = Attrs(); + phi::InferVarKernelContext infer_varkernel_context = + BuildInferVarKernelContext(expected_kernel_key, + fluid_attrs, + &infer_attrs, + has_infer_varkernel_fn); + const auto& name_map = Inputs(); auto prepare_input_data = [&](const std::string& in_name, std::vector* in_vars, @@ -2557,6 +2570,13 @@ Scope* OperatorWithKernel::PrepareData( auto kernel_type_for_var = GetKernelTypeForVar(in_name, *tensor_in, expected_kernel_key); + if (has_infer_varkernel_fn) { + infer_varkernel_context.SetVarName(const_cast(&in_name)); + infer_varkernel_context.SetDenseTensor( + const_cast(tensor_in)); + kernel_type_for_var = + phi_kernel_->infer_var_kernel_fn_(&infer_varkernel_context); + } bool need_trans_dtype = NeedTransformDataType(expected_kernel_key, kernel_type_for_var); bool need_trans_layout = NeedTransformLayout( diff --git a/paddle/fluid/operators/interpolate_v2_op.cc b/paddle/fluid/operators/interpolate_v2_op.cc deleted file mode 100644 index e3c4b0be18693..0000000000000 --- a/paddle/fluid/operators/interpolate_v2_op.cc +++ /dev/null @@ -1,813 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/multiary.h" - -#ifdef PADDLE_WITH_MKLDNN -#include "paddle/fluid/platform/mkldnn_helper.h" -#endif - -namespace paddle { -namespace operators { - -using DataLayout = phi::DataLayout; - -static void Interpolate1DInferShapeCheck(framework::InferShapeContext* ctx) { - auto dim_x = ctx->GetInputDim("X"); - auto interp_method = ctx->Attrs().Get("interp_method"); - - PADDLE_ENFORCE_EQ("linear", - interp_method, - platform::errors::InvalidArgument( - "Interpolation method can only be \"linear\" when" - "Input(X) dimension is 3, but got method = %s .", - interp_method)); - const DataLayout data_layout = - phi::StringToDataLayout(ctx->Attrs().Get("data_layout")); - for (int i = 0; i < dim_x.size(); ++i) { - PADDLE_ENFORCE_NE(dim_x[i], - 0, - platform::errors::InvalidArgument( - "The shape of input(x) should be larged " - "than 0, bug received shape[%d] is %d ", - i, - dim_x[i])); - } - if (ctx->HasInputs("SizeTensor")) { - // top prority size - auto inputs_name = ctx->Inputs("SizeTensor"); - PADDLE_ENFORCE_EQ( - inputs_name.size(), - 1, - platform::errors::InvalidArgument( - "Input(SizeTensor)'size of Op(interpolate) must be 1. " - "Attr(out_shape)'s length must be 1 for 3-D input tensor, but got " - "size = %d .", - inputs_name.size())); - int out_w = ctx->Attrs().Get("out_w"); - framework::DDim dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {dim_x[0], dim_x[1], out_w}; - } else { - dim_out = {dim_x[0], out_w, dim_x[2]}; - } - ctx->SetOutputDim("Out", dim_out); - - return; - } - - int out_w; - if (ctx->HasInput("Scale")) { - auto scale_tensor = ctx->GetInputDim("Scale"); - PADDLE_ENFORCE_EQ( - scale_tensor.size(), - 1, - platform::errors::InvalidArgument( - "Scale's dimension size must be 1, but got dimension = %d .", - scale_tensor.size())); - PADDLE_ENFORCE_EQ( - scale_tensor[0], - 1, - platform::errors::InvalidArgument( - "Scale's shape must be 1, but got shape = %d .", scale_tensor[0])); - out_w = -1; - } else { - auto scale = ctx->Attrs().Get>("scale"); - if (scale.size() > 0) { - float scale_w = -1; - scale_w = scale[0]; - PADDLE_ENFORCE_EQ( - scale_w > 0, - true, - platform::errors::InvalidArgument( - "The scale_w in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale_w)); - if (scale_w > 0.) { - // round down - out_w = (data_layout == DataLayout::kNCHW - ? static_cast(dim_x[2] * scale_w) - : static_cast(dim_x[1] * scale_w)); - // protect when input shape is -1 - out_w = out_w > 0 ? out_w : -1; - } - } else { - out_w = ctx->Attrs().Get("out_w"); - } - } - - if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { - auto out_size_dim = ctx->GetInputDim("OutSize"); - PADDLE_ENFORCE_EQ( - out_size_dim.size(), - 1, - platform::errors::InvalidArgument( - "OutSize's dimension size must be 1, but got dimention = %d .", - out_size_dim.size())); - PADDLE_ENFORCE_EQ( - out_size_dim[0], - 1, - platform::errors::InvalidArgument( - "OutSize's 0-th dimension's value must be 1, but got value = %d .", - out_size_dim[0])); - ctx->ShareLoD("X", "Out"); - return; - } - - framework::DDim dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {dim_x[0], dim_x[1], out_w}; - } else { - dim_out = {dim_x[0], out_w, dim_x[2]}; - } - ctx->SetOutputDim("Out", dim_out); -} - -static void Interpolate2DInferShapeCheck(framework::InferShapeContext* ctx) { - auto dim_x = ctx->GetInputDim("X"); - auto interp_method = ctx->Attrs().Get("interp_method"); - - PADDLE_ENFORCE( - "bilinear" == interp_method || "nearest" == interp_method || - "bicubic" == interp_method, - platform::errors::InvalidArgument( - "Interpolation method can only be \"bilinear\" or \"nearest\" when " - "Input(X) dimension is 4, but got method = %s.", - interp_method)); - const DataLayout data_layout = - phi::StringToDataLayout(ctx->Attrs().Get("data_layout")); - - for (int i = 0; i < dim_x.size(); ++i) { - PADDLE_ENFORCE_NE(dim_x[i], - 0, - platform::errors::InvalidArgument( - "The shape of input(x) should be larged " - "than 0, bug received shape[%d] is %d ", - i, - dim_x[i])); - } - - if (ctx->HasInputs("SizeTensor")) { - // top prority size - auto inputs_name = ctx->Inputs("SizeTensor"); - PADDLE_ENFORCE_EQ( - inputs_name.size(), - 2, - platform::errors::InvalidArgument( - "Input(SizeTensor)'size of Op(interpolate) must be 2. " - "Attr(out_shape)'s length must be 2 for 4-D input " - "tensor, but got size = %d .", - inputs_name.size())); - int out_h = ctx->Attrs().Get("out_h"); - int out_w = ctx->Attrs().Get("out_w"); - framework::DDim dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {dim_x[0], dim_x[1], out_h, out_w}; - } else { - dim_out = {dim_x[0], out_h, out_w, dim_x[3]}; - } - ctx->SetOutputDim("Out", dim_out); - - return; - } - - int out_h, out_w; - if (ctx->HasInput("Scale")) { - auto scale_tensor = ctx->GetInputDim("Scale"); - PADDLE_ENFORCE_EQ( - scale_tensor.size(), - 1, - platform::errors::InvalidArgument( - "Scale's dimension size must be 1, but got dimension = %d .", - scale_tensor.size())); - PADDLE_ENFORCE_EQ(scale_tensor[0] == 2 || scale_tensor[0] == 1, - true, - platform::errors::InvalidArgument( - "Scale's shape must be 2 or 1, but got shape = %d .", - scale_tensor[0])); - out_h = -1; - out_w = -1; - } else { - auto scale = ctx->Attrs().Get>("scale"); - if (scale.size() > 0) { - float scale_h = -1; - float scale_w = -1; - scale_h = scale[0]; - scale_w = scale[1]; - PADDLE_ENFORCE_EQ( - scale_w > 0, - true, - platform::errors::InvalidArgument( - "The scale_w in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale_w)); - PADDLE_ENFORCE_EQ( - scale_h > 0, - true, - platform::errors::InvalidArgument( - "The scale_h in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale_h)); - if (scale_h > 0. && scale_w > 0.) { - // round down - out_h = (data_layout == DataLayout::kNCHW - ? static_cast(dim_x[2] * scale_h) - : static_cast(dim_x[1] * scale_h)); - out_w = (data_layout == DataLayout::kNCHW - ? static_cast(dim_x[3] * scale_w) - : static_cast(dim_x[2] * scale_w)); - // protect when input shape is -1 - out_h = out_h > 0 ? out_h : -1; - out_w = out_w > 0 ? out_w : -1; - } - } else { - out_h = ctx->Attrs().Get("out_h"); - out_w = ctx->Attrs().Get("out_w"); - } - } - - if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { - auto out_size_dim = ctx->GetInputDim("OutSize"); - PADDLE_ENFORCE_EQ( - out_size_dim.size(), - 1, - platform::errors::InvalidArgument( - "OutSize's dimension size must be 1, but got dimension = %d .", - out_size_dim.size())); - PADDLE_ENFORCE_EQ( - out_size_dim[0], - 2, - platform::errors::InvalidArgument( - "OutSize's dim[0] must be 2, but got dimention = %d .", - out_size_dim[0])); - ctx->ShareLoD("X", "Out"); - return; - } - - framework::DDim dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {dim_x[0], dim_x[1], out_h, out_w}; - } else { - dim_out = {dim_x[0], out_h, out_w, dim_x[3]}; - } - ctx->SetOutputDim("Out", dim_out); -} - -static void Interpolate3DInferShapeCheck(framework::InferShapeContext* ctx) { - auto dim_x = ctx->GetInputDim("X"); - auto interp_method = ctx->Attrs().Get("interp_method"); - - PADDLE_ENFORCE("nearest" == interp_method || "trilinear" == interp_method, - platform::errors::InvalidArgument( - "Interpolation method can only be \"trilinear\" or " - "\"nearest\" when Input(X) " - "dimension is 5, but got method = %s .", - interp_method)); - const DataLayout data_layout = - phi::StringToDataLayout(ctx->Attrs().Get("data_layout")); - - for (int i = 0; i < dim_x.size(); ++i) { - PADDLE_ENFORCE_NE(dim_x[i], - 0, - platform::errors::InvalidArgument( - "The shape of input(x) should be larged " - "than 0, bug received shape[%d] is %d ", - i, - dim_x[i])); - } - - if (ctx->HasInputs("SizeTensor")) { - // top prority size - auto inputs_name = ctx->Inputs("SizeTensor"); - PADDLE_ENFORCE_EQ( - inputs_name.size(), - 3, - platform::errors::InvalidArgument( - "Input(SizeTensor)'s size of Op(interpolate) must be 3. " - "Attr(out_shape)'s length must be 3 for 5-D input " - "tensor, but got size = %d .", - inputs_name.size())); - int out_d = ctx->Attrs().Get("out_d"); - int out_h = ctx->Attrs().Get("out_h"); - int out_w = ctx->Attrs().Get("out_w"); - framework::DDim dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {dim_x[0], dim_x[1], out_d, out_h, out_w}; - } else { - dim_out = {dim_x[0], out_d, out_h, out_w, dim_x[4]}; - } - ctx->SetOutputDim("Out", dim_out); - - return; - } - - int out_d, out_h, out_w; - if (ctx->HasInput("Scale")) { - auto scale_tensor = ctx->GetInputDim("Scale"); - PADDLE_ENFORCE_EQ( - scale_tensor.size(), - 1, - platform::errors::InvalidArgument( - "Scale's dimension size must be 1, but got size = %d .", - scale_tensor.size())); - PADDLE_ENFORCE_EQ(scale_tensor[0] == 3 || scale_tensor[0] == 1, - true, - platform::errors::InvalidArgument( - "Scale's shape must be 3 or 1, but got shape = %d .", - scale_tensor[0])); - out_d = -1; - out_h = -1; - out_w = -1; - } else { - auto scale = ctx->Attrs().Get>("scale"); - if (scale.size() > 0) { - float scale_d = -1; - float scale_h = -1; - float scale_w = -1; - scale_d = scale[0]; - scale_h = scale[1]; - scale_w = scale[2]; - PADDLE_ENFORCE_EQ( - scale_w > 0, - true, - platform::errors::InvalidArgument( - "The scale_w in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale_w)); - PADDLE_ENFORCE_EQ( - scale_h > 0, - true, - platform::errors::InvalidArgument( - "The scale_h in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale_h)); - PADDLE_ENFORCE_EQ( - scale_d > 0, - true, - platform::errors::InvalidArgument( - "The scale_d in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale_d)); - if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { - // round down - out_d = (data_layout == DataLayout::kNCHW - ? static_cast(dim_x[2] * scale_d) - : static_cast(dim_x[1] * scale_d)); - out_h = (data_layout == DataLayout::kNCHW - ? static_cast(dim_x[3] * scale_h) - : static_cast(dim_x[2] * scale_h)); - out_w = (data_layout == DataLayout::kNCHW - ? static_cast(dim_x[4] * scale_w) - : static_cast(dim_x[3] * scale_w)); - // protect when input shape is -1 - out_d = out_d > 0 ? out_d : -1; - out_h = out_h > 0 ? out_h : -1; - out_w = out_w > 0 ? out_w : -1; - } - } else { - out_d = ctx->Attrs().Get("out_d"); - out_h = ctx->Attrs().Get("out_h"); - out_w = ctx->Attrs().Get("out_w"); - } - } - - if (ctx->HasInput("OutSize") && ctx->IsRuntime()) { - auto out_size_dim = ctx->GetInputDim("OutSize"); - PADDLE_ENFORCE_EQ( - out_size_dim.size(), - 1, - platform::errors::InvalidArgument( - "OutSize's dimension size must be 1, but got size is %d.", - out_size_dim.size())); - PADDLE_ENFORCE_EQ(out_size_dim[0], - 3, - platform::errors::InvalidArgument( - "OutSize's dim[0] must be 3, but got size is %d.", - out_size_dim[0])); - ctx->ShareLoD("X", "Out"); - return; - } - - framework::DDim dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {dim_x[0], dim_x[1], out_d, out_h, out_w}; - } else { - dim_out = {dim_x[0], out_d, out_h, out_w, dim_x[4]}; - } - ctx->SetOutputDim("Out", dim_out); -} - -class InterpolateV2Op : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Interpolate"); - OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Interpolate"); - - auto dim_x = ctx->GetInputDim("X"); // NCHW format - PADDLE_ENFORCE( - dim_x.size() == 3 || dim_x.size() == 4 || dim_x.size() == 5, - platform::errors::Unimplemented( - "Input(X) dimension must be 3, 4 or 5, but got dimension = %d .", - dim_x.size())); - - if (dim_x.size() == 3) { - // shape check for 1D interpolate for input tensor shape NCHW - Interpolate1DInferShapeCheck(ctx); - } else if (dim_x.size() == 4) { - // shape check for 2D interpolate for input tensor shape NCHW - Interpolate2DInferShapeCheck(ctx); - } else { // dim_x.size() == 5 - // shape check for 3D interpolate for input tensor shape NCDHW - Interpolate3DInferShapeCheck(ctx); - } - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(data_type, ctx.GetPlace()); - } - - phi::KernelKey GetKernelTypeForVar( - const std::string& var_name, - const phi::DenseTensor& tensor, - const phi::KernelKey& expected_kernel_type) const override { -#ifdef PADDLE_WITH_MKLDNN - if ((expected_kernel_type.layout() == phi::DataLayout::ONEDNN) && - (tensor.layout() != phi::DataLayout::ONEDNN)) { - auto attrs = Attrs(); - auto ar = paddle::framework::AttrReader(attrs); - const std::string data_format = ar.Get("data_layout"); - auto dl = phi::StringToDataLayout(data_format); - // Some models may have intentionally set "AnyLayout" for pool - // op. Treat this as NCHW (default data_format value) - if (dl != phi::DataLayout::kAnyLayout) { - return phi::KernelKey(tensor.place(), dl, expected_kernel_type.dtype()); - } - } -#endif - - if (var_name == "OutSize" || var_name == "SizeTensor" || - var_name == "Scale") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); - } - return phi::KernelKey( - tensor.place(), tensor.layout(), expected_kernel_type.dtype()); - } -}; - -class InterpolateV2OpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "The input tensor of interpolate operator, " - "This is a 4-D tensor with shape of [N, C, H, W] or a " - "5-D tensor with shape of [N, C, D, H, W]."); - AddInput("OutSize", - "This is a 1-D tensor with two numbers to specify output size. " - "It should be [output_height, output_width] when input is a 4-D " - "tensor and should be [output_depth, output_height, output_width] " - "when input is a 5-D tensor. It has a higher priority than " - "the attr(out_d), attr(out_h), attr(out_w) and attr(scale).") - .AsDispensable(); - AddInput("SizeTensor", - "(vector>, optional). If provided, interpolate will " - "use this. The shape of the tensor in vector MUST BE [1]. " - "It has the highest priority compare with Input(OutSize) and " - "attr(out_d), attr(out_h), attr(out_w) and attr(scale).") - .AsDuplicable() - .AsDispensable(); - AddInput("Scale", - "This is a 1-D tensor with one number to specify output scale. " - "It has the higher priority compare with attr(scale).") - .AsDispensable(); - AddOutput("Out", - "The output tensor of interpolate operator, " - "This is a tensor in same rank with Input(X)."); - - AddAttr( - "data_layout", - "(string, default NCHW) Only used in " - "an optional string from: \"NHWC\", \"NCHW\". " - "Specify that the data format of the input and output data is " - "channel_first or channel_last.") - .SetDefault("NCHW"); - AddAttr("out_d", "output depth of interpolate op.").SetDefault(0); - AddAttr("out_h", "output height of interpolate op.").SetDefault(0); - AddAttr("out_w", "output width of interpolate op.").SetDefault(0); - AddAttr>("scale", "scale_d factor of interpolate op.") - .SetDefault(std::vector{}); - AddAttr("interp_method", - "(string, default \"bilinear\"), interpolation " - "method, can be \"linear\" for linear interpolation" - ",\"bilinear\" for " - "bilinear interpolation, \"trilinear\" for trilinear " - "interpolation and \"nearest\" for nearest " - "neighbor interpolation, and \"bicubic\" for bicubic" - "interpolation.") - .SetDefault("bilinear"); - AddAttr( - "align_corners", - "an optional bool. Defaults to True. " - "If True, the centers of 4 corner pixels of the input and output " - "tensors are aligned, preserving the values at the corner pixels, " - "If False, are not aligned") - .SetDefault(true); - AddAttr("align_mode", - "(int, default \'1\'), optional for bilinear interpolation, " - "can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , " - "can be \'1\' for src_idx = scale*dst_index .") - .SetDefault(1); - AddComment(R"DOC( - This operator samples input X to given output shape by using specified - interpolation method, the interpolation methods can be \"nearest\" - for nearest neighbor interpolation and \"bilinear\" for bilinear - interpolation and \"linear\" for linear interpolation.. - - Nearest neighbor interpolation is to perform nearest neighbor interpolation - in both the 3rd dimension(in height direction) and the 4th dimension(in width - direction) on input tensor. - - Linear interpolation is the method of using a line connecting two known quantities - to determine the value of an unknown quantity between the two known quantities. - - Bilinear interpolation is an extension of linear interpolation for - interpolating functions of two variables (e.g. H-direction and - W-direction in this op) on a rectilinear 2D grid. The key idea is - to perform linear interpolation first in one direction, and then - again in the other direction. - - Trilinear interpolation is an extension of linear interpolation for - interpolating functions of three variables (e.g. D-direction, - H-direction and W-direction in this op) on a rectilinear 3D grid. - The linear interpolation is performed on three directions. - - Bicubic interpolation is an extension of cubic interpolation for interpolating - data points on a two-dimensional regular grid. The interpolated surface is - smoother than corresponding surfaces obtained by bilinear interpolation or - nearest-neighbor interpolation. - - Align_corners and align_mode are optional parameters,the calculation method - of interpolation can be selected by them. - - Example: - - For scale: - - if align_corners = True and out_{size}>1 : - - scale_{factor} = (in_{size}-1.0)/(out_{size}-1.0) - - else: - - scale_{factor} = float(in_{size}/out_{size}) - - - Nearest neighbor interpolation: - - if: - align_corners = False - - input : (N,C,H_in,W_in) - output: (N,C,H_out,W_out) where: - - H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor - W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor - - else: - align_corners = True - - input : (N,C,H_in,W_in) - output: (N,C,H_out,W_out) where: - - H_out = round(H_{in} * scale_{factor}) - W_out = round(W_{in} * scale_{factor}) - - Bilinear interpolation: - - if: - align_corners = False , align_mode = 0 - - input : (N,C,H_in,W_in) - output: (N,C,H_out,W_out) where: - - H_out = (H_{in}+0.5) * scale_{factor} - 0.5 - W_out = (W_{in}+0.5) * scale_{factor} - 0.5 - - - else: - - input : (N,C,H_in,W_in) - output: (N,C,H_out,W_out) where: - - H_out = H_{in} * scale_{factor} - W_out = W_{in} * scale_{factor} - - Trilinear interpolation: - - if: - align_corners = False , align_mode = 0 - - input : (N,C,D_in,H_in,W_in) - output: (N,C,D_out,H_out,W_out) where: - - D_out = (D_{in}+0.5) * scale_{factor} - 0.5 - H_out = (H_{in}+0.5) * scale_{factor} - 0.5 - W_out = (W_{in}+0.5) * scale_{factor} - 0.5 - - - else: - - input : (N,C,D_in,H_in,W_in) - output: (N,C,D_out,H_out,W_out) where: - - D_out = D_{in} * scale_{factor} - H_out = H_{in} * scale_{factor} - W_out = W_{in} * scale_{factor} - - Bicubic interpolation: - - if: - align_corners = False - input : (N,C,H_in,W_in) - output: (N,C,H_out,W_out) where: - H_out = (H_{in}+0.5) * scale_{factor} - 0.5 - W_out = (W_{in}+0.5) * scale_{factor} - 0.5 - else: - input : (N,C,H_in,W_in) - output: (N,C,H_out,W_out) where: - H_out = H_{in} * scale_{factor} - W_out = W_{in} * scale_{factor} - - For details of nearest neighbor interpolation, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation - - For details of bilinear interpolation, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Bilinear_interp_v2olation - - For details of trilinear interpolation, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Trilinear_interp_v2olation - - For details of bicubic interpolation, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Bicubic_interpolation - )DOC"); - } -}; - -class InterpolateV2OpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InterpolateGrad"); - OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), - "Input", - "Out@GRAD", - "InterpolateGrad"); - - auto dim_x = ctx->GetInputDim("X"); - if (ctx->HasOutput(framework::GradVarName("X"))) { - ctx->SetOutputDim(framework::GradVarName("X"), dim_x); - } - } - - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Out")), - ctx.GetPlace()); - } - - phi::KernelKey GetKernelTypeForVar( - const std::string& var_name, - const phi::DenseTensor& tensor, - const phi::KernelKey& expected_kernel_type) const override { - if (var_name == "OutSize" || var_name == "SizeTensor" || - var_name == "Scale") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); - } - return phi::KernelKey( - tensor.place(), tensor.layout(), expected_kernel_type.dtype()); - } -}; - -template -class InterpolateV2GradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType(this->ForwardOpType() + "_grad"); - op->SetInput("X", this->Input("X")); - if (this->HasInput("SizeTensor") > 0) { - op->SetInput("SizeTensor", this->Input("SizeTensor")); - } - if (this->HasInput("OutSize") > 0) { - op->SetInput("OutSize", this->Input("OutSize")); - } - if (this->HasInput("Scale") > 0) { - op->SetInput("Scale", this->Input("Scale")); - } - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -DECLARE_NO_NEED_BUFFER_VARS_INFERER(InterpolateV2GradNoNeedBufferVarsInferer, - "X"); - -} // namespace operators -} // namespace paddle - -// interp_v2 support scale_factor whose input type is list, this operation is -// not -// compatible with interp_op, so a new one is added in paddle2.0 -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(bilinear_interp_v2, - BilinearInterpInferShapeFunctor, - PD_INFER_META(phi::InterpolateInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(nearest_interp_v2, - NearestInterpInferShapeFunctor, - PD_INFER_META(phi::InterpolateInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(trilinear_interp_v2, - TrilinearInterpInferShapeFunctor, - PD_INFER_META(phi::InterpolateInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(bicubic_interp_v2, - BicubicInterpInferShapeFunctor, - PD_INFER_META(phi::InterpolateInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(linear_interp_v2, - LinearInterpInferShapeFunctor, - PD_INFER_META(phi::InterpolateInferMeta)); - -REGISTER_OPERATOR(bilinear_interp_v2, - ops::InterpolateV2Op, - ops::InterpolateV2OpMaker, - ops::InterpolateV2GradMaker, - ops::InterpolateV2GradMaker, - BilinearInterpInferShapeFunctor); -REGISTER_OPERATOR(bilinear_interp_v2_grad, - ops::InterpolateV2OpGrad, - ops::InterpolateV2GradNoNeedBufferVarsInferer); -REGISTER_OPERATOR(nearest_interp_v2, - ops::InterpolateV2Op, - ops::InterpolateV2OpMaker, - ops::InterpolateV2GradMaker, - ops::InterpolateV2GradMaker, - NearestInterpInferShapeFunctor); -REGISTER_OPERATOR(nearest_interp_v2_grad, - ops::InterpolateV2OpGrad, - ops::InterpolateV2GradNoNeedBufferVarsInferer); -REGISTER_OPERATOR(trilinear_interp_v2, - ops::InterpolateV2Op, - ops::InterpolateV2OpMaker, - ops::InterpolateV2GradMaker, - ops::InterpolateV2GradMaker, - TrilinearInterpInferShapeFunctor); -REGISTER_OPERATOR(trilinear_interp_v2_grad, - ops::InterpolateV2OpGrad, - ops::InterpolateV2GradNoNeedBufferVarsInferer); -REGISTER_OPERATOR(bicubic_interp_v2, - ops::InterpolateV2Op, - ops::InterpolateV2OpMaker, - ops::InterpolateV2GradMaker, - ops::InterpolateV2GradMaker, - BicubicInterpInferShapeFunctor); -REGISTER_OPERATOR(bicubic_interp_v2_grad, - ops::InterpolateV2OpGrad, - ops::InterpolateV2GradNoNeedBufferVarsInferer); -REGISTER_OPERATOR(linear_interp_v2, - ops::InterpolateV2Op, - ops::InterpolateV2OpMaker, - ops::InterpolateV2GradMaker, - ops::InterpolateV2GradMaker, - LinearInterpInferShapeFunctor); -REGISTER_OPERATOR(linear_interp_v2_grad, - ops::InterpolateV2OpGrad, - ops::InterpolateV2GradNoNeedBufferVarsInferer); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index ee75d281b97da..563a2f63704d1 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -118,6 +118,36 @@ func : atanh_grad inplace : (out_grad -> x_grad) +- backward_op : bicubic_interp_grad + forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + optional: out_size, size_tensor, scale_tensor + no_need_buffer : x + kernel : + func : bicubic_interp_grad + data_type : output_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + +- backward_op : bilinear_interp_grad + forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + no_need_buffer : x + optional: out_size, size_tensor, scale_tensor + kernel : + func : bilinear_interp_grad + data_type : output_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - backward_op : bmm_grad forward : bmm (Tensor x, Tensor y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad) @@ -738,6 +768,21 @@ kernel : func : lgamma_grad +- backward_op : linear_interp_grad + forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + optional: out_size, size_tensor, scale_tensor + no_need_buffer : x + kernel : + func : linear_interp_grad + data_type : output_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - backward_op : log10_grad forward : log10 (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) @@ -917,6 +962,21 @@ kernel : func : mv_grad +- backward_op : nearest_interp_grad + forward : nearest_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + optional: out_size, size_tensor, scale_tensor + no_need_buffer : x + kernel : + func : nearest_interp_grad + data_type : output_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - backward_op : nll_loss_grad forward : nll_loss (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") -> Tensor(out), Tensor(total_weight) args : (Tensor input, Tensor label, Tensor weight, Tensor total_weight, Tensor out_grad, int64_t ignore_index, str reduction) @@ -1537,6 +1597,21 @@ data_type : out_grad no_need_buffer : x +- backward_op : trilinear_interp_grad + forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + optional: out_size, size_tensor, scale_tensor + no_need_buffer : x + kernel : + func : trilinear_interp_grad + data_type : output_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - backward_op : trunc_grad forward : trunc (Tensor input) -> Tensor(out) args : (Tensor out_grad) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 2a90272ad5f9c..750ea13071de5 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -142,30 +142,6 @@ func : bce_loss_grad inplace : (out_grad -> input_grad) -- backward_op : bicubic_interp_grad - forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - optional: out_size, size_tensor, scale_tensor - kernel : - func : bicubic_interp_grad - data_type : output_grad - -- backward_op : bilinear_interp_grad - forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - optional: out_size, size_tensor, scale_tensor - kernel : - func : bilinear_interp_grad - data_type : output_grad - - backward_op : bilinear_tensor_product_grad forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out) args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad) @@ -649,18 +625,6 @@ no_need_buffer : bias optional : scale, bias -- backward_op : linear_interp_grad - forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - optional: out_size, size_tensor, scale_tensor - kernel : - func : linear_interp_grad - data_type : output_grad - - backward_op : log_softmax_grad forward : log_softmax(Tensor x, int axis) -> Tensor(out) args : (Tensor out, Tensor out_grad, int axis) @@ -878,18 +842,6 @@ func : multiply_triple_grad optional : fwd_grad_grad_x, fwd_grad_grad_y, grad_x_grad, grad_y_grad, grad_grad_out_grad -- backward_op : nearest_interp_grad - forward : nearest_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - optional: out_size, size_tensor, scale_tensor - kernel : - func : nearest_interp_grad - data_type : output_grad - - backward_op : norm_grad forward : norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm) args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test) @@ -1363,18 +1315,6 @@ kernel : func : tril_grad -- backward_op : trilinear_interp_grad - forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - optional: out_size, size_tensor, scale_tensor - kernel : - func : trilinear_interp_grad - data_type : output_grad - - backward_op : triu_grad forward : triu(Tensor x, int diagonal) -> Tensor(out) args : (Tensor out_grad, int diagonal) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index af77c6f903d49..e767bd4cbdc52 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -236,28 +236,6 @@ func : bce_loss backward : bce_loss_grad -- op : bicubic_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : bicubic_interp - data_type : x - backward : bicubic_interp_grad - -- op : bilinear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : bilinear_interp - data_type : x - backward : bilinear_interp_grad - - op : bilinear_tensor_product args : (Tensor x, Tensor y, Tensor weight, Tensor bias) output : Tensor @@ -928,17 +906,6 @@ kernel : func : less_than -- op : linear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : linear_interp - data_type : x - backward : linear_interp_grad - - op : linspace args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place) output : Tensor(out) @@ -1226,17 +1193,6 @@ multiply_sr {selected_rows, dense -> selected_rows} backward : multiply_grad -- op : nearest_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : nearest_interp - data_type : x - backward : nearest_interp_grad - - op : nms args : (Tensor x, float threshold) output : Tensor(out) @@ -1727,17 +1683,6 @@ data_type : dtype backend : place -- op : trilinear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : trilinear_interp - data_type : x - backward : trilinear_interp_grad - - op : triu args : (Tensor x, int diagonal) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 1eb10cf637c9c..cf58c190c4d95 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -163,11 +163,27 @@ - op : bicubic_interp (bicubic_interp_v2) backward : bicubic_interp_grad (bicubic_interp_v2_grad) + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out extra : attrs : [bool use_mkldnn = false] - op : bilinear_interp (bilinear_interp_v2) backward : bilinear_interp_grad (bilinear_interp_v2_grad) + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out extra : attrs : [bool use_mkldnn = false] @@ -941,6 +957,10 @@ - op : linear_interp (linear_interp_v2) backward : linear_interp_grad (linear_interp_v2_grad) + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out extra : attrs : [bool use_mkldnn = false] @@ -1163,6 +1183,10 @@ - op : nearest_interp (nearest_interp_v2) backward : nearest_interp_grad (nearest_interp_v2_grad) + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out extra : attrs : [bool use_mkldnn = false] @@ -1752,6 +1776,10 @@ - op : trilinear_interp (trilinear_interp_v2) backward : trilinear_interp_grad (trilinear_interp_v2_grad) + inputs : + {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} + outputs : + output : Out extra : attrs : [bool use_mkldnn = false] diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 02edf19a75d50..64b71b827f557 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -125,6 +125,32 @@ kernel : func : bernoulli +- op : bicubic_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : bicubic_interp + data_type : x + backward : bicubic_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + +- op : bilinear_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : bilinear_interp + data_type : x + backward : bilinear_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - op : bitwise_and args : (Tensor x, Tensor y) output : Tensor(out) @@ -748,6 +774,19 @@ func : lgamma backward : lgamma_grad +- op : linear_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : linear_interp + data_type : x + backward : linear_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - op : log args : (Tensor x) output : Tensor @@ -906,6 +945,19 @@ func : mv backward : mv_grad +- op : nearest_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : nearest_interp + data_type : x + backward : nearest_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - op : nll_loss args : (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") output : Tensor(out), Tensor(total_weight) @@ -1374,6 +1426,19 @@ func : trace backward : trace_grad +- op : trilinear_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : trilinear_interp + data_type : x + backward : trilinear_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + - op : trunc args : (Tensor input) output : Tensor diff --git a/paddle/phi/core/infer_varkernel_utils.h b/paddle/phi/core/infer_varkernel_utils.h new file mode 100644 index 0000000000000..037d976003f4b --- /dev/null +++ b/paddle/phi/core/infer_varkernel_utils.h @@ -0,0 +1,67 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/phi/core/attribute.h" +#include "paddle/phi/core/device_context.h" +#include "paddle/phi/core/enforce.h" +#include "paddle/phi/core/tensor_base.h" +#include "paddle/phi/core/tensor_utils.h" +#include "paddle/phi/core/type_defs.h" +#include "paddle/utils/optional.h" +#include "paddle/utils/small_vector.h" + +namespace phi { + +class KernelKey; + +/** + * Note: InferVarKernelContext is only designed to MKLDNN kernel when the + * related memeber function 'GetKernelTypeFor' is special. + */ +class InferVarKernelContext { + public: + InferVarKernelContext() = default; + InferVarKernelContext(const InferVarKernelContext&) = default; + explicit InferVarKernelContext(const phi::KernelKey* kernel_key, + const AttributeMap* attrs) + : kernel_key_(kernel_key), attrs_(attrs) {} + + const std::string& GetVarName(void) const { return *var_name_; } + + const DenseTensor& GetTensor(void) const { return *tensor_; } + + const KernelKey& GetKernelKey(void) const { return *kernel_key_; } + + const AttributeMap& GetAttrs(void) const { return *attrs_; } + + void SetVarName(std::string* var_name) { this->var_name_ = var_name; } + + void SetDenseTensor(DenseTensor* tensor) { this->tensor_ = tensor; } + + private: + const KernelKey* kernel_key_; + // Use AttributeMap in namespace 'phi' to avoid depending 'fuild' + const AttributeMap* attrs_; + std::string* var_name_; + DenseTensor* tensor_; +}; + +typedef KernelKey (*InferVarKernelFn)(const InferVarKernelContext*); + +} // namespace phi diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index d69cb7802b6c6..2587aff0694a9 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -25,11 +25,11 @@ #include "paddle/phi/common/layout.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/enforce.h" +#include "paddle/phi/core/infer_varkernel_utils.h" #include "paddle/phi/core/type_defs.h" #include "paddle/phi/core/utils/data_type.h" #include "paddle/utils/flat_hash_map.h" #include "paddle/utils/small_vector.h" - namespace phi { using DataType = paddle::experimental::DataType; @@ -286,6 +286,8 @@ class Kernel { return kernel_registered_type_; } + InferVarKernelFn infer_var_kernel_fn_{nullptr}; + private: KernelFn fn_{nullptr}; void* variadic_fn_ = nullptr; diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index 7f6ded1958f2d..ae68d051a1a30 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -15,11 +15,45 @@ #include "paddle/phi/kernels/interpolate_kernel.h" #include "paddle/phi/backends/onednn/onednn_reuse.h" +#include "paddle/phi/core/infer_varkernel_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/interpolate_function.h" namespace phi { +phi::KernelKey InterpolateGetKernelTypeForVar( + const InferVarKernelContext* ctx) { + const std::string& var_name = ctx->GetVarName(); + const DenseTensor& tensor = ctx->GetTensor(); + const KernelKey& expected_kernel_type = ctx->GetKernelKey(); + const AttributeMap& attrs = ctx->GetAttrs(); + // Only input require reshaping, weights and + // bias are having shape in NCHW order + if ((expected_kernel_type.layout() == phi::DataLayout::ONEDNN) && + (tensor.layout() != phi::DataLayout::ONEDNN)) { + auto it = attrs.find("data_layout"); + PADDLE_ENFORCE_NE(it, + attrs.end(), + paddle::platform::errors::NotFound( + "Cannot find attribute %s.", "data_layout")); + const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); + auto dl = phi::StringToDataLayout(data_layout); + // Some models may have intentionally set "AnyLayout" for pool + // op. Treat this as NCHW (default data_format value) + if (dl != phi::DataLayout::kAnyLayout) { + return phi::KernelKey(tensor.place(), dl, expected_kernel_type.dtype()); + } + } + if (var_name == "OutSize" || var_name == "SizeTensor" || + var_name == "Scale") { + return phi::KernelKey(phi::Backend::ALL_BACKEND, + expected_kernel_type.layout(), + expected_kernel_type.dtype()); + } + return phi::KernelKey( + tensor.place(), tensor.layout(), expected_kernel_type.dtype()); +} + namespace funcs { template class InterpolateOneDNNHandler @@ -232,7 +266,9 @@ PD_REGISTER_KERNEL(bilinear_interp, ONEDNN, phi::BilinearInterpKernel, float, - phi::dtype::bfloat16) {} + phi::dtype::bfloat16) { + kernel->infer_var_kernel_fn_ = phi::InterpolateGetKernelTypeForVar; +} PD_REGISTER_KERNEL(nearest_interp, OneDNN, @@ -241,4 +277,6 @@ PD_REGISTER_KERNEL(nearest_interp, float, phi::dtype::bfloat16, int8_t, - uint8_t) {} + uint8_t) { + kernel->infer_var_kernel_fn_ = phi::InterpolateGetKernelTypeForVar; +} diff --git a/paddle/phi/ops/compat/interpolate_sig.cc b/paddle/phi/ops/compat/interpolate_sig.cc deleted file mode 100644 index 1a1a1cbb6670c..0000000000000 --- a/paddle/phi/ops/compat/interpolate_sig.cc +++ /dev/null @@ -1,201 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature BilinearInterpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("bilinear_interp", - {"X", "OutSize", "SizeTensor", "Scale"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"Out"}); -} - -KernelSignature NearestInterpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("nearest_interp", - {"X", "OutSize", "SizeTensor", "Scale"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"Out"}); -} -KernelSignature TrilinearInterpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("trilinear_interp", - {"X", "OutSize", "SizeTensor", "Scale"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"Out"}); -} - -KernelSignature LinearInterpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("linear_interp", - {"X", "OutSize", "SizeTensor", "Scale"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"Out"}); -} - -KernelSignature BicubicInterpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("bicubic_interp", - {"X", "OutSize", "SizeTensor", "Scale"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"Out"}); -} - -KernelSignature BilinearInterpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("bilinear_interp_grad", - {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"X@GRAD"}); -} - -KernelSignature NearestInterpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("nearest_interp_grad", - {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"X@GRAD"}); -} -KernelSignature TrilinearInterpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("trilinear_interp_grad", - {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"X@GRAD"}); -} - -KernelSignature LinearInterpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("linear_interp_grad", - {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"X@GRAD"}); -} - -KernelSignature BicubicInterpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("bicubic_interp_grad", - {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, - {"data_layout", - "out_d", - "out_h", - "out_w", - "scale", - "interp_method", - "align_corners", - "align_mode"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2, linear_interp); -PD_REGISTER_BASE_KERNEL_NAME(bilinear_interp_v2, bilinear_interp); -PD_REGISTER_BASE_KERNEL_NAME(trilinear_interp_v2, trilinear_interp); -PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2, nearest_interp); -PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2, bicubic_interp); - -PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2_grad, linear_interp_grad); -PD_REGISTER_BASE_KERNEL_NAME(bilinear_interp_v2_grad, bilinear_interp_grad); -PD_REGISTER_BASE_KERNEL_NAME(trilinear_interp_v2_grad, trilinear_interp_grad); -PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2_grad, nearest_interp_grad); -PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2_grad, bicubic_interp_grad); - -PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2, - phi::BilinearInterpOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(nearest_interp_v2, - phi::NearestInterpOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(trilinear_interp_v2, - phi::TrilinearInterpOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(linear_interp_v2, - phi::LinearInterpOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(bicubic_interp_v2, - phi::BicubicInterpOpArgumentMapping); - -PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2_grad, - phi::BilinearInterpGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(nearest_interp_v2_grad, - phi::NearestInterpGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(trilinear_interp_v2_grad, - phi::TrilinearInterpGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(linear_interp_v2_grad, - phi::LinearInterpGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(bicubic_interp_v2_grad, - phi::BicubicInterpGradOpArgumentMapping); From 28e76869e02a9c9dd567a46e50a09b1d94c71316 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Wed, 1 Mar 2023 11:16:54 +0000 Subject: [PATCH 02/12] fix the bug in 'BuildInferVarKernelContext' --- paddle/fluid/framework/data_transform.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 7f403e607fedd..b404c395ef813 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -172,7 +172,7 @@ phi::InferVarKernelContext BuildInferVarKernelContext( } } } - return phi::InferVarKernelContext(&kernel_key, &phi_attrs); + return phi::InferVarKernelContext(&kernel_key, phi_attrs); } } // namespace framework From d6b6d1c9d6b1d623c087a9b4ae56e5722f9a3f50 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Fri, 3 Mar 2023 06:35:58 +0000 Subject: [PATCH 03/12] add infer_varkernel_utils.cc --- paddle/fluid/framework/CMakeLists.txt | 6 ++-- paddle/phi/core/CMakeLists.txt | 3 ++ paddle/phi/core/infer_varkernel_utils.cc | 43 ++++++++++++++++++++++++ paddle/phi/core/infer_varkernel_utils.h | 23 ++++--------- paddle/phi/kernels/CMakeLists.txt | 6 +++- paddle/scripts/paddle_build.sh | 0 6 files changed, 62 insertions(+), 19 deletions(-) create mode 100644 paddle/phi/core/infer_varkernel_utils.cc mode change 100644 => 100755 paddle/scripts/paddle_build.sh diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 1f310222e013b..3990a8179cc04 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -479,7 +479,8 @@ if(WITH_XPU) kernel_factory infershape_utils op_utils - op_compat_infos) + op_compat_infos + infer_varkernel_utils) else() cc_library( operator @@ -505,7 +506,8 @@ else() kernel_factory infershape_utils op_utils - op_compat_infos) + op_compat_infos + infer_varkernel_utils) endif() cc_test( diff --git a/paddle/phi/core/CMakeLists.txt b/paddle/phi/core/CMakeLists.txt index e413ee4ef2ac6..239e61257e60a 100644 --- a/paddle/phi/core/CMakeLists.txt +++ b/paddle/phi/core/CMakeLists.txt @@ -101,6 +101,9 @@ cc_library( infermeta_utils SRCS infermeta_utils.cc DEPS meta_tensor) + +cc_library(infer_varkernel_utils SRCS infer_varkernel_utils.cc) + cc_library( selected_rows SRCS selected_rows_impl.cc selected_rows.cc diff --git a/paddle/phi/core/infer_varkernel_utils.cc b/paddle/phi/core/infer_varkernel_utils.cc new file mode 100644 index 0000000000000..ffb467bbd2129 --- /dev/null +++ b/paddle/phi/core/infer_varkernel_utils.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/infer_varkernel_utils.h" + +namespace phi { + +const std::string& InferVarKernelContext::GetVarName(void) const { + return *var_name_; +} + +const DenseTensor& InferVarKernelContext::GetTensor(void) const { + return *tensor_; +} + +const KernelKey& InferVarKernelContext::GetKernelKey(void) const { + return *kernel_key_; +} + +const AttributeMap& InferVarKernelContext::GetAttrs(void) const { + return *attrs_; +} + +void InferVarKernelContext::SetVarName(std::string* var_name) { + this->var_name_ = var_name; +} + +void InferVarKernelContext::SetDenseTensor(DenseTensor* tensor) { + this->tensor_ = tensor; +} + +} // namespace phi diff --git a/paddle/phi/core/infer_varkernel_utils.h b/paddle/phi/core/infer_varkernel_utils.h index 037d976003f4b..af58886b07be5 100644 --- a/paddle/phi/core/infer_varkernel_utils.h +++ b/paddle/phi/core/infer_varkernel_utils.h @@ -14,22 +14,13 @@ #pragma once -#include -#include - #include "paddle/phi/core/attribute.h" -#include "paddle/phi/core/device_context.h" #include "paddle/phi/core/enforce.h" -#include "paddle/phi/core/tensor_base.h" -#include "paddle/phi/core/tensor_utils.h" -#include "paddle/phi/core/type_defs.h" -#include "paddle/utils/optional.h" -#include "paddle/utils/small_vector.h" namespace phi { class KernelKey; - +class DenseTensor; /** * Note: InferVarKernelContext is only designed to MKLDNN kernel when the * related memeber function 'GetKernelTypeFor' is special. @@ -42,17 +33,17 @@ class InferVarKernelContext { const AttributeMap* attrs) : kernel_key_(kernel_key), attrs_(attrs) {} - const std::string& GetVarName(void) const { return *var_name_; } + const std::string& GetVarName(void) const; - const DenseTensor& GetTensor(void) const { return *tensor_; } + const DenseTensor& GetTensor(void) const; - const KernelKey& GetKernelKey(void) const { return *kernel_key_; } + const KernelKey& GetKernelKey(void) const; - const AttributeMap& GetAttrs(void) const { return *attrs_; } + const AttributeMap& GetAttrs(void) const; - void SetVarName(std::string* var_name) { this->var_name_ = var_name; } + void SetVarName(std::string* var_name); - void SetDenseTensor(DenseTensor* tensor) { this->tensor_ = tensor; } + void SetDenseTensor(DenseTensor* tensor); private: const KernelKey* kernel_key_; diff --git a/paddle/phi/kernels/CMakeLists.txt b/paddle/phi/kernels/CMakeLists.txt index e18425104213d..66d1294fdac23 100644 --- a/paddle/phi/kernels/CMakeLists.txt +++ b/paddle/phi/kernels/CMakeLists.txt @@ -146,7 +146,11 @@ file(GLOB kernel_xpu "xpu/*.cc" "selected_rows/xpu/*.cc" "fusion/xpu/*.cc") add_library(phi_cpu ${kernel_cc}) kernel_declare("${kernel_cc}") -target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS}) +if(WITH_MKLDNN) + target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS} infer_varkernel_utils) +else() + target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS}) +endif() set(ADD_PHI_KERNELS phi_cpu) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh old mode 100644 new mode 100755 From 47e4ad23ea650e66adb381f0e34a27012df5da1c Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Fri, 3 Mar 2023 09:35:11 +0000 Subject: [PATCH 04/12] fix the bug:the first two parametes of 'BuildInferVarKernelContext' can't be template variable --- .../new_executor/interpreter/data_transfer.cc | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc index 536a8ec66c0f0..4b2a87068862a 100644 --- a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc @@ -483,12 +483,11 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, phi::AttributeMap infer_attrs{}; auto fluid_attrs = static_cast(op_base)->Attrs(); + auto phi_kernelkey = + framework::TransOpKernelTypeToPhiKernelKey(expected_kernel_key); phi::InferVarKernelContext infer_varkernel_context = BuildInferVarKernelContext( - framework::TransOpKernelTypeToPhiKernelKey(expected_kernel_key), - fluid_attrs, - &infer_attrs, - has_infer_varkernel_fn); + phi_kernelkey, fluid_attrs, &infer_attrs, has_infer_varkernel_fn); auto apply_data_transform_for_one_parameter = [&](const std::string& parameter_name, const std::vector& argument_names, @@ -565,10 +564,7 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, auto kernel_key_for_var = static_cast(op_base) ->GetKernelTypeForVar( - parameter_name, - *tensor_in, - framework::TransOpKernelTypeToPhiKernelKey( - expected_kernel_key)); + parameter_name, *tensor_in, phi_kernelkey); if (has_infer_varkernel_fn) { infer_varkernel_context.SetVarName( const_cast(¶meter_name)); From 77c66d65e86b355fe94559550123fd54cbddb422 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Mon, 6 Mar 2023 11:07:25 +0000 Subject: [PATCH 05/12] change the code according to first review --- paddle/fluid/framework/CMakeLists.txt | 4 +-- paddle/fluid/framework/data_transform.cc | 15 +++++++--- paddle/fluid/framework/data_transform.h | 4 +-- .../new_executor/interpreter/data_transfer.cc | 10 +++---- paddle/fluid/framework/operator.cc | 18 +++++------ paddle/phi/api/yaml/op_compat.yaml | 8 ----- paddle/phi/core/CMakeLists.txt | 2 -- paddle/phi/core/compat/CMakeLists.txt | 1 + .../get_kerneltype_forvar_utils.cc} | 16 +++++----- .../get_kerneltype_forvar_utils.h} | 30 +++++++++---------- paddle/phi/core/kernel_factory.h | 4 +-- paddle/phi/kernels/CMakeLists.txt | 3 +- .../phi/kernels/onednn/interpolate_kernel.cc | 12 ++++---- 13 files changed, 61 insertions(+), 66 deletions(-) rename paddle/phi/core/{infer_varkernel_utils.cc => compat/get_kerneltype_forvar_utils.cc} (57%) rename paddle/phi/core/{infer_varkernel_utils.h => compat/get_kerneltype_forvar_utils.h} (54%) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 3990a8179cc04..b55a0d6cc0da0 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -480,7 +480,7 @@ if(WITH_XPU) infershape_utils op_utils op_compat_infos - infer_varkernel_utils) + get_kerneltype_forvar_utils) else() cc_library( operator @@ -507,7 +507,7 @@ else() infershape_utils op_utils op_compat_infos - infer_varkernel_utils) + get_kerneltype_forvar_utils) endif() cc_test( diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index b404c395ef813..7ac708ac9b0b1 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -156,23 +156,30 @@ void SetTensorToVariable(const Variable &in_var, } } -phi::InferVarKernelContext BuildInferVarKernelContext( +phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext( const phi::KernelKey &kernel_key, const AttributeMap &fluid_attrs, phi::AttributeMap *phi_attrs, bool has_infer_varkernel_fn) { + // According to "GetKernelTypeForVar" in some ops those have MKLDNN codes, + // the only "string" member, such as "data_layout" 、"data_format" of + // AttibuteMap is useful. In the future the other args maybe used. Because the + // "phi" module should not depend on the "fluid", transform + // "framework::AttributeMap" to "phi::AttributeMap". if (has_infer_varkernel_fn) { for (auto &attr : fluid_attrs) { switch (attr.second.index()) { - case 3: // string - (*phi_attrs)[attr.first] = paddle::get<3>(attr.second); + case 3: // string type in framwork::Attribute + (*phi_attrs)[attr.first] = PADDLE_GET_CONST(std::string, attr.second); break; default: + VLOG(6) << "GetKernelTypeForVarContext currently only use " + "std::string. You add other type if need."; break; } } } - return phi::InferVarKernelContext(&kernel_key, phi_attrs); + return phi::GetKernelTypeForVarContext(&kernel_key, phi_attrs); } } // namespace framework diff --git a/paddle/fluid/framework/data_transform.h b/paddle/fluid/framework/data_transform.h index b113bd4d1118e..60491aa9fe489 100644 --- a/paddle/fluid/framework/data_transform.h +++ b/paddle/fluid/framework/data_transform.h @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/macros.h" #include "paddle/phi/common/transform.h" -#include "paddle/phi/core/infer_varkernel_utils.h" +#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { @@ -47,7 +47,7 @@ void SetTensorToVariable(const Variable &in_var, const phi::DenseTensor &tensor, Variable *out_var); -phi::InferVarKernelContext BuildInferVarKernelContext( +phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext( const phi::KernelKey &kernel_key, const AttributeMap &fluid_attrs, phi::AttributeMap *phi_attrs, diff --git a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc index 4b2a87068862a..a6af40ae4dbc4 100644 --- a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc @@ -476,17 +476,15 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, bool transfered = false; DataTranferHelper data_transfer_helper(place, var_scope, local_scope); phi::Kernel* phi_kernel = op_func_node->phi_kernel_; - auto has_infer_varkernel_fn = (phi_kernel && phi_kernel->IsValid() && - phi_kernel->GetKernelRegisteredType() == - phi::KernelRegisteredType::FUNCTION && - phi_kernel->infer_var_kernel_fn_ != nullptr); + auto has_infer_varkernel_fn = + (phi_kernel && phi_kernel->infer_var_kernel_fn_ != nullptr); phi::AttributeMap infer_attrs{}; auto fluid_attrs = static_cast(op_base)->Attrs(); auto phi_kernelkey = framework::TransOpKernelTypeToPhiKernelKey(expected_kernel_key); - phi::InferVarKernelContext infer_varkernel_context = - BuildInferVarKernelContext( + phi::GetKernelTypeForVarContext infer_varkernel_context = + BuildGetKernelTypeForVarContext( phi_kernelkey, fluid_attrs, &infer_attrs, has_infer_varkernel_fn); auto apply_data_transform_for_one_parameter = [&](const std::string& parameter_name, diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 5dca0d192fd34..88861c567a7da 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -38,8 +38,8 @@ limitations under the License. */ #include "paddle/fluid/platform/profiler/supplement_tracing.h" #include "paddle/phi/common/int_array.h" #include "paddle/phi/common/scalar.h" +#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h" #include "paddle/phi/core/ddim.h" -#include "paddle/phi/core/infer_varkernel_utils.h" #include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_factory.h" #include "paddle/phi/ops/compat/signatures.h" @@ -2448,17 +2448,15 @@ Scope* OperatorWithKernel::PrepareData( } } - auto has_infer_varkernel_fn = (run_phi_kernel_ && - phi_kernel_->GetKernelRegisteredType() == - phi::KernelRegisteredType::FUNCTION && - phi_kernel_->infer_var_kernel_fn_ != nullptr); + auto has_infer_varkernel_fn = + (run_phi_kernel_ && phi_kernel_->infer_var_kernel_fn_ != nullptr); phi::AttributeMap infer_attrs{}; auto fluid_attrs = Attrs(); - phi::InferVarKernelContext infer_varkernel_context = - BuildInferVarKernelContext(expected_kernel_key, - fluid_attrs, - &infer_attrs, - has_infer_varkernel_fn); + phi::GetKernelTypeForVarContext infer_varkernel_context = + BuildGetKernelTypeForVarContext(expected_kernel_key, + fluid_attrs, + &infer_attrs, + has_infer_varkernel_fn); const auto& name_map = Inputs(); auto prepare_input_data = [&](const std::string& in_name, diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index a40734347d0fb..c466e828bf12a 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -163,10 +163,6 @@ - op : bicubic_interp (bicubic_interp_v2) backward : bicubic_interp_grad (bicubic_interp_v2_grad) - inputs : - {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} - outputs : - output : Out inputs : {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : @@ -176,10 +172,6 @@ - op : bilinear_interp (bilinear_interp_v2) backward : bilinear_interp_grad (bilinear_interp_v2_grad) - inputs : - {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} - outputs : - output : Out inputs : {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : diff --git a/paddle/phi/core/CMakeLists.txt b/paddle/phi/core/CMakeLists.txt index 239e61257e60a..efd21dffd64f9 100644 --- a/paddle/phi/core/CMakeLists.txt +++ b/paddle/phi/core/CMakeLists.txt @@ -102,8 +102,6 @@ cc_library( SRCS infermeta_utils.cc DEPS meta_tensor) -cc_library(infer_varkernel_utils SRCS infer_varkernel_utils.cc) - cc_library( selected_rows SRCS selected_rows_impl.cc selected_rows.cc diff --git a/paddle/phi/core/compat/CMakeLists.txt b/paddle/phi/core/compat/CMakeLists.txt index 3d76cb1112c7c..3051ae4989222 100644 --- a/paddle/phi/core/compat/CMakeLists.txt +++ b/paddle/phi/core/compat/CMakeLists.txt @@ -6,6 +6,7 @@ cc_library( op_utils SRCS op_utils.cc DEPS arg_map_context enforce) +cc_library(get_kerneltype_forvar_utils SRCS get_kerneltype_forvar_utils.cc) set(convert_utils_deps data_type place op_utils phi_backends) diff --git a/paddle/phi/core/infer_varkernel_utils.cc b/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc similarity index 57% rename from paddle/phi/core/infer_varkernel_utils.cc rename to paddle/phi/core/compat/get_kerneltype_forvar_utils.cc index ffb467bbd2129..abb483b97240d 100644 --- a/paddle/phi/core/infer_varkernel_utils.cc +++ b/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,31 +12,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/phi/core/infer_varkernel_utils.h" +#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h" namespace phi { -const std::string& InferVarKernelContext::GetVarName(void) const { +const std::string& GetKernelTypeForVarContext::GetVarName(void) const { return *var_name_; } -const DenseTensor& InferVarKernelContext::GetTensor(void) const { +const DenseTensor& GetKernelTypeForVarContext::GetTensor(void) const { return *tensor_; } -const KernelKey& InferVarKernelContext::GetKernelKey(void) const { +const KernelKey& GetKernelTypeForVarContext::GetKernelKey(void) const { return *kernel_key_; } -const AttributeMap& InferVarKernelContext::GetAttrs(void) const { +const AttributeMap& GetKernelTypeForVarContext::GetAttrs(void) const { return *attrs_; } -void InferVarKernelContext::SetVarName(std::string* var_name) { +void GetKernelTypeForVarContext::SetVarName(std::string* var_name) { this->var_name_ = var_name; } -void InferVarKernelContext::SetDenseTensor(DenseTensor* tensor) { +void GetKernelTypeForVarContext::SetDenseTensor(DenseTensor* tensor) { this->tensor_ = tensor; } diff --git a/paddle/phi/core/infer_varkernel_utils.h b/paddle/phi/core/compat/get_kerneltype_forvar_utils.h similarity index 54% rename from paddle/phi/core/infer_varkernel_utils.h rename to paddle/phi/core/compat/get_kerneltype_forvar_utils.h index af58886b07be5..61ff7b53a1d91 100644 --- a/paddle/phi/core/infer_varkernel_utils.h +++ b/paddle/phi/core/compat/get_kerneltype_forvar_utils.h @@ -1,10 +1,10 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,22 +15,22 @@ #pragma once #include "paddle/phi/core/attribute.h" -#include "paddle/phi/core/enforce.h" namespace phi { class KernelKey; class DenseTensor; /** - * Note: InferVarKernelContext is only designed to MKLDNN kernel when the - * related memeber function 'GetKernelTypeFor' is special. + * Note: GetKernelTypeForVarContext is currently designed to MKLDNN kernel when + * the related memeber function 'GetKernelTypeForVar' is special. It is + * possiable to uesed for other custom hardwares in the future. */ -class InferVarKernelContext { +class GetKernelTypeForVarContext { public: - InferVarKernelContext() = default; - InferVarKernelContext(const InferVarKernelContext&) = default; - explicit InferVarKernelContext(const phi::KernelKey* kernel_key, - const AttributeMap* attrs) + GetKernelTypeForVarContext() = default; + GetKernelTypeForVarContext(const GetKernelTypeForVarContext&) = default; + explicit GetKernelTypeForVarContext(const phi::KernelKey* kernel_key, + const AttributeMap* attrs) : kernel_key_(kernel_key), attrs_(attrs) {} const std::string& GetVarName(void) const; @@ -46,13 +46,13 @@ class InferVarKernelContext { void SetDenseTensor(DenseTensor* tensor); private: - const KernelKey* kernel_key_; + const KernelKey* kernel_key_; // not owned // Use AttributeMap in namespace 'phi' to avoid depending 'fuild' - const AttributeMap* attrs_; - std::string* var_name_; - DenseTensor* tensor_; + const AttributeMap* attrs_; // not owned + std::string* var_name_; // not owned + DenseTensor* tensor_; // not owned }; -typedef KernelKey (*InferVarKernelFn)(const InferVarKernelContext*); +typedef KernelKey (*GetKernelTypeForVarFn)(const GetKernelTypeForVarContext*); } // namespace phi diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index 2587aff0694a9..866c93030bb1d 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -24,8 +24,8 @@ #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/core/compat/convert_utils.h" +#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h" #include "paddle/phi/core/enforce.h" -#include "paddle/phi/core/infer_varkernel_utils.h" #include "paddle/phi/core/type_defs.h" #include "paddle/phi/core/utils/data_type.h" #include "paddle/utils/flat_hash_map.h" @@ -286,7 +286,7 @@ class Kernel { return kernel_registered_type_; } - InferVarKernelFn infer_var_kernel_fn_{nullptr}; + GetKernelTypeForVarFn infer_var_kernel_fn_{nullptr}; private: KernelFn fn_{nullptr}; diff --git a/paddle/phi/kernels/CMakeLists.txt b/paddle/phi/kernels/CMakeLists.txt index da5eb0569b190..335c77300fead 100644 --- a/paddle/phi/kernels/CMakeLists.txt +++ b/paddle/phi/kernels/CMakeLists.txt @@ -151,7 +151,8 @@ file(GLOB kernel_xpu "xpu/*.cc" "selected_rows/xpu/*.cc" "fusion/xpu/*.cc") add_library(phi_cpu ${kernel_cc}) kernel_declare("${kernel_cc}") if(WITH_MKLDNN) - target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS} infer_varkernel_utils) + target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS} + get_kerneltype_forvar_utils) else() target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS}) endif() diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index ae68d051a1a30..bdb9e37587a40 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -15,14 +15,14 @@ #include "paddle/phi/kernels/interpolate_kernel.h" #include "paddle/phi/backends/onednn/onednn_reuse.h" -#include "paddle/phi/core/infer_varkernel_utils.h" +#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/interpolate_function.h" namespace phi { phi::KernelKey InterpolateGetKernelTypeForVar( - const InferVarKernelContext* ctx) { + const GetKernelTypeForVarContext* ctx) { const std::string& var_name = ctx->GetVarName(); const DenseTensor& tensor = ctx->GetTensor(); const KernelKey& expected_kernel_type = ctx->GetKernelKey(); @@ -32,10 +32,10 @@ phi::KernelKey InterpolateGetKernelTypeForVar( if ((expected_kernel_type.layout() == phi::DataLayout::ONEDNN) && (tensor.layout() != phi::DataLayout::ONEDNN)) { auto it = attrs.find("data_layout"); - PADDLE_ENFORCE_NE(it, - attrs.end(), - paddle::platform::errors::NotFound( - "Cannot find attribute %s.", "data_layout")); + PADDLE_ENFORCE_NE( + it, + attrs.end(), + phi::errors::NotFound("Cannot find attribute %s.", "data_layout")); const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); auto dl = phi::StringToDataLayout(data_layout); // Some models may have intentionally set "AnyLayout" for pool From 4a77a395cfdb2ddc90223b8174df9bced2cacd33 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Mon, 6 Mar 2023 11:10:35 +0000 Subject: [PATCH 06/12] change the code according to first review --- paddle/phi/core/kernel_factory.h | 2 +- paddle/phi/kernels/onednn/interpolate_kernel.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index 866c93030bb1d..8c954b7d06564 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -286,7 +286,7 @@ class Kernel { return kernel_registered_type_; } - GetKernelTypeForVarFn infer_var_kernel_fn_{nullptr}; + GetKernelTypeForVarFn get_kerneltype_forvar_fn_{nullptr}; private: KernelFn fn_{nullptr}; diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index bdb9e37587a40..25ee825b086fc 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -267,7 +267,7 @@ PD_REGISTER_KERNEL(bilinear_interp, phi::BilinearInterpKernel, float, phi::dtype::bfloat16) { - kernel->infer_var_kernel_fn_ = phi::InterpolateGetKernelTypeForVar; + kernel->get_kerneltype_forvar_fn_ = phi::InterpolateGetKernelTypeForVar; } PD_REGISTER_KERNEL(nearest_interp, @@ -278,5 +278,5 @@ PD_REGISTER_KERNEL(nearest_interp, phi::dtype::bfloat16, int8_t, uint8_t) { - kernel->infer_var_kernel_fn_ = phi::InterpolateGetKernelTypeForVar; + kernel->get_kerneltype_forvar_fn_ = phi::InterpolateGetKernelTypeForVar; } From 2f670e195dde60b4cb0d513c2cc6b38f7e54ea27 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Mon, 6 Mar 2023 12:27:58 +0000 Subject: [PATCH 07/12] change the mode of paddle_build.sh --- paddle/scripts/paddle_build.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 paddle/scripts/paddle_build.sh diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh old mode 100755 new mode 100644 From f66c2d60dd991ecd1fc8eada5c73e4b4c2d7ec70 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Mon, 6 Mar 2023 13:02:44 +0000 Subject: [PATCH 08/12] change 'infer_var_kernel_fn_' to 'get_kerneltype_forvar_fn_' --- .../framework/new_executor/interpreter/data_transfer.cc | 6 +++--- paddle/fluid/framework/operator.cc | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc index fc2150d986696..52cc26d54f13d 100644 --- a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc @@ -477,7 +477,7 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, DataTranferHelper data_transfer_helper(place, var_scope, local_scope); phi::Kernel* phi_kernel = op_func_node->phi_kernel_; auto has_infer_varkernel_fn = - (phi_kernel && phi_kernel->infer_var_kernel_fn_ != nullptr); + (phi_kernel && phi_kernel->get_kerneltype_forvar_fn_ != nullptr); phi::AttributeMap infer_attrs{}; auto fluid_attrs = static_cast(op_base)->Attrs(); @@ -568,8 +568,8 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, const_cast(¶meter_name)); infer_varkernel_context.SetDenseTensor( const_cast(tensor_in)); - kernel_key_for_var = - phi_kernel->infer_var_kernel_fn_(&infer_varkernel_context); + kernel_key_for_var = phi_kernel->get_kerneltype_forvar_fn_( + &infer_varkernel_context); } std::unique_ptr expected_kernel_key_for_argument_def = nullptr; diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index de9a40cd069b5..4869e54ac687c 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -2450,7 +2450,7 @@ Scope* OperatorWithKernel::PrepareData( } auto has_infer_varkernel_fn = - (run_phi_kernel_ && phi_kernel_->infer_var_kernel_fn_ != nullptr); + (run_phi_kernel_ && phi_kernel_->get_kerneltype_forvar_fn_ != nullptr); phi::AttributeMap infer_attrs{}; auto fluid_attrs = Attrs(); phi::GetKernelTypeForVarContext infer_varkernel_context = @@ -2526,7 +2526,7 @@ Scope* OperatorWithKernel::PrepareData( infer_varkernel_context.SetDenseTensor( const_cast(tensor_in)); kernel_type_for_var = - phi_kernel_->infer_var_kernel_fn_(&infer_varkernel_context); + phi_kernel_->get_kerneltype_forvar_fn_(&infer_varkernel_context); } bool need_trans_dtype = NeedTransformDataType(expected_kernel_key, kernel_type_for_var); From 1b5b3d90206c54731a16d4f2b7db739f80e516ae Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Mon, 6 Mar 2023 14:42:04 +0000 Subject: [PATCH 09/12] add the error information --- paddle/phi/kernels/onednn/interpolate_kernel.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index 1ca2b5b5f4bec..0e48e27c6a260 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -35,7 +35,8 @@ phi::KernelKey InterpolateGetKernelTypeForVar( PADDLE_ENFORCE_NE( it, attrs.end(), - phi::errors::NotFound("Cannot find attribute %s.", "data_layout")); + phi::errors::NotFound("Cannot find attribute data_layout, please check " + "the AttributeMap")); const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); auto dl = phi::StringToDataLayout(data_layout); // Some models may have intentionally set "AnyLayout" for pool From 42ea9cd59321c0a8b1bca6023284baf2a87b9de5 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Wed, 8 Mar 2023 08:40:33 +0000 Subject: [PATCH 10/12] fix NotFound infomation warning --- .../phi/kernels/onednn/interpolate_kernel.cc | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index 0e48e27c6a260..5abc26fba2f89 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -21,7 +21,7 @@ namespace phi { -phi::KernelKey InterpolateGetKernelTypeForVar( +KernelKey InterpolateGetKernelTypeForVar( const GetKernelTypeForVarContext* ctx) { const std::string& var_name = ctx->GetVarName(); const DenseTensor& tensor = ctx->GetTensor(); @@ -29,29 +29,28 @@ phi::KernelKey InterpolateGetKernelTypeForVar( const AttributeMap& attrs = ctx->GetAttrs(); // Only input require reshaping, weights and // bias are having shape in NCHW order - if ((expected_kernel_type.layout() == phi::DataLayout::ONEDNN) && - (tensor.layout() != phi::DataLayout::ONEDNN)) { + if ((expected_kernel_type.layout() == DataLayout::ONEDNN) && + (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_layout"); PADDLE_ENFORCE_NE( it, attrs.end(), - phi::errors::NotFound("Cannot find attribute data_layout, please check " - "the AttributeMap")); + errors::NotFound("Attr(data_layout) of InterpolateOp is not found.")); const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); - auto dl = phi::StringToDataLayout(data_layout); + auto dl = StringToDataLayout(data_layout); // Some models may have intentionally set "AnyLayout" for pool // op. Treat this as NCHW (default data_format value) - if (dl != phi::DataLayout::kAnyLayout) { - return phi::KernelKey(tensor.place(), dl, expected_kernel_type.dtype()); + if (dl != DataLayout::kAnyLayout) { + return KernelKey(tensor.place(), dl, expected_kernel_type.dtype()); } } if (var_name == "OutSize" || var_name == "SizeTensor" || var_name == "Scale") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); + return KernelKey(Backend::ALL_BACKEND, + expected_kernel_type.layout(), + expected_kernel_type.dtype()); } - return phi::KernelKey( + return KernelKey( tensor.place(), tensor.layout(), expected_kernel_type.dtype()); } From 342664485a7bb7ba7a9f1155ec01acb928143c39 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Wed, 8 Mar 2023 08:42:47 +0000 Subject: [PATCH 11/12] fix NotFound infomation warning --- paddle/phi/kernels/onednn/interpolate_kernel.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index 5abc26fba2f89..0b2939e324d22 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -35,7 +35,8 @@ KernelKey InterpolateGetKernelTypeForVar( PADDLE_ENFORCE_NE( it, attrs.end(), - errors::NotFound("Attr(data_layout) of InterpolateOp is not found.")); + errors::NotFound( + "Attribute(data_layout) of InterpolateOp is not found.")); const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); auto dl = StringToDataLayout(data_layout); // Some models may have intentionally set "AnyLayout" for pool From b305cd5c5e1aa7e0e25441f2d059eec82f8bf943 Mon Sep 17 00:00:00 2001 From: lzydev <1528794076@qq.com> Date: Wed, 8 Mar 2023 08:57:39 +0000 Subject: [PATCH 12/12] fix NotFound infomation warning --- paddle/phi/kernels/onednn/interpolate_kernel.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index 0b2939e324d22..b9a8672d792ce 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -32,11 +32,6 @@ KernelKey InterpolateGetKernelTypeForVar( if ((expected_kernel_type.layout() == DataLayout::ONEDNN) && (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_layout"); - PADDLE_ENFORCE_NE( - it, - attrs.end(), - errors::NotFound( - "Attribute(data_layout) of InterpolateOp is not found.")); const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); auto dl = StringToDataLayout(data_layout); // Some models may have intentionally set "AnyLayout" for pool