Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Pten] move operators/math/math_function_* to pten/kernels/func #39300

Merged
merged 4 commits into from
Feb 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ limitations under the License. */
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/split.h"
#include "paddle/pten/kernels/funcs/math_function.h"

#include "paddle/fluid/distributed/ps/service/ps_client.h"

Expand Down Expand Up @@ -180,7 +180,7 @@ inline void MergeVars(const std::string &var_name,

// set output tensor to 0.
paddle::platform::CPUDeviceContext cpu_ctx;
paddle::operators::math::SetConstant<paddle::platform::CPUDeviceContext, T>
pten::funcs::SetConstant<paddle::platform::CPUDeviceContext, T>
constant_functor;
constant_functor(cpu_ctx, out_t, static_cast<T>(0));
// sum all vars to out
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,10 @@
#include "paddle/fluid/distributed/ps/service/ps_service/service.h"
#include "paddle/fluid/distributed/ps/service/sendrecv.pb.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace paddle {
namespace distributed {
class GraphPyService {
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/distributed/test/brpc_service_dense_sgd_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ limitations under the License. */
#include "paddle/fluid/distributed/ps/service/brpc_ps_server.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace paddle {
namespace distributed {
Expand All @@ -42,7 +42,6 @@ class DenseTensor;
namespace framework = paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace math = paddle::operators::math;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ limitations under the License. */
#include "paddle/fluid/distributed/ps/service/brpc_ps_server.h"
#include "paddle/fluid/distributed/ps/service/env.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace paddle {
namespace distributed {
Expand All @@ -43,7 +43,6 @@ class DenseTensor;
namespace framework = paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace math = paddle::operators::math;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;

Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/distributed/test/brpc_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License. */
#include "gtest/gtest.h"

#include "paddle/fluid/distributed/ps/service/brpc_utils.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace paddle {
namespace framework {
Expand All @@ -28,7 +28,6 @@ class Variable;
namespace framework = paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace math = paddle::operators::math;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;

Expand All @@ -42,7 +41,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,
lod1.push_back(framework::Vector<size_t>({1, 3, 8}));
tensor1->set_lod(lod1);
tensor1->mutable_data<float>(*place);
math::set_constant(ctx, tensor1, 31.9);
pten::funcs::set_constant(ctx, tensor1, 31.9);

// var 2
framework::Variable* var2 = scope->Var("x2");
Expand All @@ -52,7 +51,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,
lod2.push_back(framework::Vector<size_t>({1, 1}));
tensor2->set_lod(lod2);
tensor2->mutable_data<int>(*place);
math::set_constant(ctx, tensor2, 100);
pten::funcs::set_constant(ctx, tensor2, 100);

// var 3
framework::Variable* var3 = scope->Var("x3");
Expand All @@ -62,7 +61,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,
auto* rows = slr->mutable_rows();
tensor3->Resize(framework::make_ddim({564, 128}));
tensor3->mutable_data<float>(*place);
math::set_constant(ctx, tensor3, 32.7);
pten::funcs::set_constant(ctx, tensor3, 32.7);
for (int i = 0; i < 564; ++i) rows->push_back(i);
}

Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/distributed/test/graph_node_split_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,13 @@ limitations under the License. */
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace framework = paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace math = paddle::operators::math;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;

Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/distributed/test/graph_node_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,13 @@ limitations under the License. */
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace framework = paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace math = paddle::operators::math;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/grad_tensor_holder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#include "paddle/fluid/imperative/gradient_accumulator.h"

#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace egr {

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_device_transform_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/pten/kernels/funcs/math_function.h"

#include "paddle/fluid/framework/pten_utils.h"

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/data_layout_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

#include "paddle/fluid/framework/data_layout_transform.h"

#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/kernels/funcs/math_function.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_reuse.h"
#endif
Expand Down Expand Up @@ -42,7 +42,7 @@ void CastDataLayout::apply() {
auto place = ctx_->GetPlace();

if (platform::is_cpu_place(place)) {
operators::math::Transpose<platform::CPUDeviceContext, T, 4> trans4;
pten::funcs::Transpose<platform::CPUDeviceContext, T, 4> trans4;
auto* context = static_cast<const platform::CPUDeviceContext*>(ctx_);
trans4(*context, in_, out_, axis_);
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ limitations under the License. */
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/transform.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace paddle {
namespace framework {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
#include "paddle/fluid/framework/ir/node.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/kernels/funcs/math_function.h"

#if defined(PADDLE_WITH_DGC)
#include "paddle/fluid/framework/details/sparse_all_reduce_op_handle.h"
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/imperative/basic_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/op_base.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/pten/kernels/funcs/math_function.h"

DECLARE_bool(sort_sum_gradient);

Expand Down Expand Up @@ -103,7 +103,7 @@ void BasicEngine::Init(
if (grad_tensor == nullptr) {
grad_var->Resize(fwd_var.dims());
grad_var->mutable_data(fwd_var.place(), fwd_var.type());
operators::math::set_constant(*dev_ctx, grad_var, 1.0);
pten::funcs::set_constant(*dev_ctx, grad_var, 1.0);
} else {
paddle::framework::TensorCopy(
grad_tensor->Var().Get<framework::LoDTensor>(), fwd_var.place(),
Expand Down Expand Up @@ -156,7 +156,7 @@ void BasicEngine::CheckBackwardInputs(const OpBase& op) {
VLOG(6) << "Set ungenerated Grad: " << var->Name()
<< " as zero with dtype "
<< framework::DataTypeToString(var->ForwardDataType());
operators::math::set_constant(*dev_ctx, tensor, 0.0);
pten::funcs::set_constant(*dev_ctx, tensor, 0.0);
}
}
}
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/imperative/gradient_accumulator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/pten/kernels/funcs/math_function.h"
#ifdef PADDLE_WITH_XPU
#include "xpu/refactor/math.h"
#endif
Expand Down Expand Up @@ -210,7 +210,7 @@ void TensorAddImpl(const framework::Tensor& src, framework::Tensor* dst,
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
paddle::platform::DeviceContext* ctx = pool.Get(place);
auto dev_ctx = dynamic_cast<DeviceContext*>(ctx);
operators::math::ElementwiseAddTo<DeviceContext, T> func;
pten::funcs::ElementwiseAddTo<DeviceContext, T> func;
func(dev_ctx, src, dst);
}

Expand Down Expand Up @@ -703,12 +703,12 @@ void EagerGradientAccumulator::SumGrad(std::shared_ptr<VariableWrapper> var,
<< var->Var().Get<framework::LoDTensor>().dims();
tensor->Resize(var->Var().Get<framework::LoDTensor>().dims());
tensor->mutable_data(place, var->DataType());
operators::math::set_constant(*dev_ctx, tensor, 0.0);
pten::funcs::set_constant(*dev_ctx, tensor, 0.0);
} else {
auto* tensor =
dst_var->MutableVar()->GetMutable<framework::LoDTensor>();
tensor->mutable_data(place, var->DataType());
operators::math::set_constant(*dev_ctx, tensor, 0.0);
pten::funcs::set_constant(*dev_ctx, tensor, 0.0);
}
}
}
Expand Down Expand Up @@ -835,12 +835,12 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr<VariableWrapper> var,
<< var->Var().Get<framework::LoDTensor>().dims();
tensor->Resize(var->Var().Get<framework::LoDTensor>().dims());
tensor->mutable_data(place, var->DataType());
operators::math::set_constant(*dev_ctx, tensor, 0.0);
pten::funcs::set_constant(*dev_ctx, tensor, 0.0);
} else {
auto* tensor =
dst_var->MutableVar()->GetMutable<framework::LoDTensor>();
tensor->mutable_data(place, var->DataType());
operators::math::set_constant(*dev_ctx, tensor, 0.0);
pten::funcs::set_constant(*dev_ctx, tensor, 0.0);
}
}
// looks like tmp_grad_vars will not have any member but just in case
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/layer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@
#include "paddle/fluid/imperative/op_base.h"
#include "paddle/fluid/imperative/prepared_operator.h"
#include "paddle/fluid/imperative/var_helper.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/pten/kernels/funcs/math_function.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Expand Down Expand Up @@ -229,7 +229,7 @@ void VarBase::ClearGradient(bool set_to_zero) {
if (set_to_zero) {
auto* dev_ctx =
platform::DeviceContextPool::Instance().Get(grad_t->place());
operators::math::set_constant(*dev_ctx, grad_t, 0.0);
pten::funcs::set_constant(*dev_ctx, grad_t, 0.0);
} else {
grad_t->clear();
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/partial_grad_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/op_base.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/pten/kernels/funcs/math_function.h"

DECLARE_bool(sort_sum_gradient);

Expand Down Expand Up @@ -316,7 +316,7 @@ static void FillConstantLike(const VariableWrapper &ref_var,
} else {
dst_tensor->mutable_data(place, ref_var.DataType());
}
operators::math::set_constant(*dev_ctx, dst_tensor, value);
pten::funcs::set_constant(*dev_ctx, dst_tensor, value);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
{static_cast<int64_t>(length)});
} else {
group_tensor.Resize({static_cast<int64_t>(length)});
operators::math::set_constant(*dev_ctx, &group_tensor, 0.0);
pten::funcs::set_constant(*dev_ctx, &group_tensor, 0.0);
}
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/reducer.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace paddle {
namespace imperative {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/tests/test_gradient_accmulator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace imperative = paddle::imperative;
namespace platform = paddle::platform;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/addmm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/kernels/funcs/math_function.h"

namespace ops = paddle::operators;
namespace plat = paddle::platform;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/affine_grid_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ class AffineGridGradOpCUDAKernel : public framework::OpKernel<T> {
w = size_attr[3];
}
T* theta_grad_data = theta_grad->mutable_data<T>({n, 2, 3}, ctx.GetPlace());
math::SetConstant<paddle::platform::CUDADeviceContext, T>()(
pten::funcs::SetConstant<paddle::platform::CUDADeviceContext, T>()(
ctx.cuda_device_context(), theta_grad, static_cast<T>(0));

T h_step;
Expand Down
Loading