Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

昇腾和寒武纪相关代码退场 npu相关代码退场 #53566

Merged
merged 1 commit into from
May 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions paddle/fluid/framework/dlpack_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,6 @@ struct DLDeviceVisitor
platform::errors::Unimplemented("platform::XPUPlace is not supported"));
}

inline ::DLDevice operator()(const platform::NPUPlace &place) const {
PADDLE_THROW(
platform::errors::Unimplemented("platform::NPUPlace is not supported"));
}

inline ::DLDevice operator()(const platform::NPUPinnedPlace &place) const {
PADDLE_THROW(platform::errors::Unimplemented(
"platform::NPUPinnedPlace is not supported"));
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/framework/ir/auto_mixed_precision_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ static phi::Backend ConvertPlaceToBackend(const phi::Place& place) {
return phi::Backend::GPU;
case phi::AllocationType::XPU:
return phi::Backend::XPU;
case phi::AllocationType::NPU:
return phi::Backend::NPU;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Cannot convert place(%d).", static_cast<int>(place.GetType())));
Expand Down
9 changes: 0 additions & 9 deletions paddle/fluid/framework/op_registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -374,9 +374,6 @@ struct OpKernelRegistrarFunctorEx<PlaceType,
#define REGISTER_OP_XPU_KERNEL(op_type, ...) \
REGISTER_OP_KERNEL(op_type, XPU, ::paddle::platform::XPUPlace, __VA_ARGS__)

#define REGISTER_OP_NPU_KERNEL(op_type, ...) \
REGISTER_OP_KERNEL(op_type, NPU, ::paddle::platform::NPUPlace, __VA_ARGS__)

#define REGISTER_OP_KERNEL_EX(op_type, library_type, place_class, \
customized_name, \
customized_type_value, \
Expand Down Expand Up @@ -413,12 +410,6 @@ struct OpKernelRegistrarFunctorEx<PlaceType,
::paddle::framework::OpKernelType::kDefaultCustomizedTypeValue, \
__VA_ARGS__)

#define REGISTER_OP_NPU_KERNEL_FUNCTOR(op_type, ...) \
REGISTER_OP_KERNEL_EX( \
op_type, NPU, ::paddle::platform::NPUPlace, DEFAULT_TYPE, \
::paddle::framework::OpKernelType::kDefaultCustomizedTypeValue, \
__VA_ARGS__)

#define REGISTER_OP_IPU_KERNEL_FUNCTOR(op_type, ...) \
REGISTER_OP_KERNEL_EX( \
op_type, IPU, ::paddle::platform::IPUPlace, DEFAULT_TYPE, \
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/framework/parallel_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1327,8 +1327,6 @@ void ParallelExecutor::InitExecutorPrivateMemberInfo(
device_name = "CPU";
} else if (member_->use_device_ == p::kCUDA) {
device_name = "CUDA";
} else if (member_->use_device_ == p::kNPU) {
device_name = "NPU";
} else if (member_->use_device_ == p::kXPU) {
device_name = "XPU";
} else {
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,6 @@ phi::Backend ConvertBackend(paddle_infer::PlaceType backend) {
case paddle_infer::PlaceType::kGPU:
// NOTE: phi also support phi::Backend::GPUDNN.
return phi::Backend::GPU;
case paddle_infer::PlaceType::kNPU:
return phi::Backend::NPU;
case paddle_infer::PlaceType::kXPU:
return phi::Backend::XPU;
case paddle_infer::PlaceType::kCPU:
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/inference/api/api_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,6 @@ bool NativePaddlePredictor::Init(
place_ = paddle::platform::CUDAPlace(config_.device);
} else if (config_.use_xpu) {
place_ = paddle::platform::XPUPlace(config_.device);
} else if (config_.use_npu) {
place_ = paddle::platform::NPUPlace(config_.device);
} else {
place_ = paddle::platform::CPUPlace();
}
Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/inference/api/details/zero_copy_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,6 @@ T *Tensor::mutable_data(PlaceType place) {
case static_cast<int>(PlaceType::kXPU): {
return tensor->mutable_data<T>(paddle::platform::XPUPlace(device_));
}
case static_cast<int>(PlaceType::kNPU): {
return tensor->mutable_data<T>(paddle::platform::NPUPlace(device_));
}
case static_cast<int>(PlaceType::kCUSTOM): {
return tensor->mutable_data<T>(
paddle::platform::CustomPlace(device_type_, device_));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/paddle_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ enum DataType {
// TODO(Inference): support more data types if needed.
};

enum class PlaceType { kUNK = -1, kCPU, kGPU, kXPU, kNPU, kIPU, kCUSTOM };
enum class PlaceType { kUNK = -1, kCPU, kGPU, kXPU, kIPU, kCUSTOM };

enum class DataLayout { kUNK = -1, kAny, kNHWC, kNCHW };

Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,3 @@ namespace plat = paddle::platform;
REGISTER_OP_WITHOUT_GRADIENT(c_sync_comm_stream,
ops::CSyncCommStreamOp,
ops::CSyncCommStreamOpMaker);

REGISTER_OP_NPU_KERNEL(c_sync_comm_stream,
ops::CSyncCommStreamKernel<float, plat::NPUPlace>);
6 changes: 1 addition & 5 deletions paddle/fluid/operators/fill_constant_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,6 @@ class FillConstantOp : public framework::OperatorWithKernel {
case 3:
kt.set_backend(phi::Backend::XPU);
break;
case 4:
kt.set_backend(phi::Backend::NPU);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Could NOT determine the place of variable, place_type = %d .",
Expand Down Expand Up @@ -161,8 +158,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
"0: CPUPlace. "
"1: CUDAPlace. "
"2: CUDAPinnedPlace. "
"3: XPUPlace. "
"4: NPUPlace. ")
"3: XPUPlace. ")
.SetDefault(-1);
AddOutput("Out",
"(Tensor) Tensor of specified shape will be filled "
Expand Down
15 changes: 7 additions & 8 deletions paddle/fluid/operators/memcpy_d2h_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -86,16 +86,15 @@ class MemcpyD2HOpProtoMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Out",
"(phi::DenseTensor) The type of output "
"is the same as input X.");
AddAttr<int>(
"dst_place_type",
"Determine the dst place of tensor copy. "
"By Now it ONLY support XPU/NPUPlace/CUDAPlace <-> CUDAPinnedPlace/CPU"
"Other place type is Unimplemented and will cause ERROR."
"0: dst is on CPUPlace. "
"1: dst is on CUDAPinnedPlace. ");
AddAttr<int>("dst_place_type",
"Determine the dst place of tensor copy. "
"By Now it ONLY support XPU/CUDAPlace <-> CUDAPinnedPlace/CPU"
"Other place type is Unimplemented and will cause ERROR."
"0: dst is on CPUPlace. "
"1: dst is on CUDAPinnedPlace. ");
AddComment(R"DOC(
MemcpyD2H Operator.
By now, it ONLY supports the memcopy between NPUPlace/CUDAPlace <-> CUDAPinnedPlace/CPU.
By now, it ONLY supports the memcopy between CUDAPlace <-> CUDAPinnedPlace/CPU.
You would have to update it if you want other more capacities.
Out = X, when type in [phi::DenseTensor]
raise error if the type is not listed above.
Expand Down
7 changes: 3 additions & 4 deletions paddle/fluid/operators/memcpy_h2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,12 @@ class MemcpyH2DOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"Determine the dst place of tensor copy. "
"By Now it support:"
"0. CUDAPinnedPlace/CPU <->CUDAPlace"
"1. NPUPinnedPlace/CPU <-> NPUPlace"
"2. CPU <->XPUPlace"
"3. CPU <->IPUPlace"
"1. CPU <->XPUPlace"
"2. CPU <->IPUPlace"
"Other place type is Unimplemented and will cause ERROR.");
AddComment(R"DOC(
MemcpyD2H Operator.
By now, it ONLY supports the memcopy between CUDAPinnedPlace/CPU <-> NPUPlace/CUDAPlace.
By now, it ONLY supports the memcopy between CUDAPinnedPlace/CPU <-> CUDAPlace.
You would have to update it if you want other more capacities.
Out = X, when type in [phi::DenseTensor]
raise error if the type is not listed above.
Expand Down
11 changes: 4 additions & 7 deletions paddle/fluid/operators/memcpy_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,20 +105,17 @@ class MemcpyOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"is the same as input X.");
AddAttr<int>("dst_place_type",
"Determine the dst place of tensor copy. "
"By Now it ONLY support CUDAPlace <-> CUDAPinnedPlace or "
"NPUPlace <-> CPUPlace. "
"By Now it ONLY support CUDAPlace <-> CUDAPinnedPlace."
"Other place type is Unimplemented and will cause ERROR."
"0: dst is on CPUPlace. "
"1: dst is on CUDAPlace. "
"2: dst is on CUDAPinnedPlace. "
"3: dst is on XPUPlace. "
"4: dst is on NPUPlace. "
"5: dst is on NPUPinnerPlace. "
"6: dst is on CustomDevicePlace");
"4: dst is on NPUPinnerPlace. "
"5: dst is on CustomDevicePlace");
AddComment(R"DOC(
Memcpy Operator.
By now, it ONLY supports the memcopy between CUDAPinnedPlace <-> CUDAPlace or
NPUPlace <-> CPUPlace, and used as an internal op by Recompute-Offload.
By now, it ONLY supports the memcopy between CUDAPinnedPlace <-> CUDAPlace, and used as an internal op by Recompute-Offload.
You would have to update it if you want other more capacities.

Out = X, when type in [phi::DenseTensor]
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/platform/device_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ DeviceType Place2DeviceType(const platform::Place& place);
constexpr DeviceType kCPU = DeviceType::CPU;
constexpr DeviceType kCUDA = DeviceType::CUDA;
constexpr DeviceType kXPU = DeviceType::XPU;
constexpr DeviceType kNPU = DeviceType::NPU;
constexpr DeviceType kIPU = DeviceType::IPU;
constexpr DeviceType kCUSTOM_DEVICE = DeviceType::CUSTOM_DEVICE;

Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/platform/device_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
using ::paddle::platform::kCPU;
using ::paddle::platform::kCUDA;
using ::paddle::platform::kCUSTOM_DEVICE;
using ::paddle::platform::kNPU;
using ::paddle::platform::kXPU;

USE_EVENT(kCPU)
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/platform/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,6 @@ Place PlaceHelper::CreatePlace(const std::string &dev_type, size_t dev_id) {
return platform::CPUPlace();
} else if (dev_type == "gpu") {
return platform::CUDAPlace(dev_id);
} else if (dev_type == "npu") {
return platform::NPUPlace(dev_id);
} else if (dev_type == "xpu") {
return platform::XPUPlace(dev_id);
} else {
Expand Down
6 changes: 0 additions & 6 deletions paddle/fluid/platform/place.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ using Place = phi::Place;
using CPUPlace = phi::CPUPlace;
using CUDAPlace = phi::GPUPlace;
using CUDAPinnedPlace = phi::GPUPinnedPlace;
using NPUPlace = phi::NPUPlace;
using NPUPinnedPlace = phi::NPUPinnedPlace;
using XPUPlace = phi::XPUPlace;
using IPUPlace = phi::IPUPlace;
Expand Down Expand Up @@ -88,11 +87,6 @@ typename Visitor::result_type VisitPlace(const Place &place,
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::NPU: {
PADDLE_THROW(platform::errors::Unavailable(
"Paddle is not compiled with NPU. Cannot visit npu_pinned"));
return typename Visitor::result_type();
}
case phi::AllocationType::NPUPINNED: {
PADDLE_THROW(platform::errors::Unavailable(
"Paddle is not compiled with NPU. Cannot visit npu_pinned"));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ void InitTensorWithNumpyValue(TensorObject* self,
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Place should be one of "
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace/CustomPlace"));
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/CustomPlace"));
}
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager_math_op_patch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ void InitTensorWithNumpyValue(const py::object& array,
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Place should be one of "
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace/CustomPlace"));
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/CustomPlace"));
}
}

Expand Down
6 changes: 1 addition & 5 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ extern PyTypeObject* g_place_pytype;
extern PyTypeObject* g_cudaplace_pytype;
extern PyTypeObject* g_cpuplace_pytype;
extern PyTypeObject* g_xpuplace_pytype;
extern PyTypeObject* g_npuplace_pytype;
extern PyTypeObject* g_cudapinnedplace_pytype;
extern PyTypeObject* g_customplace_pytype;
extern PyTypeObject* g_framework_tensor_pytype;
Expand Down Expand Up @@ -529,9 +528,6 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_xpuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::XPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_npuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::NPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cudapinnedplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CUDAPinnedPlace>();
Expand All @@ -542,7 +538,7 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"one "
"of(Place,CUDAPlace,CPUPlace,XPUPlace,NPUPlace,CUDAPinnedPlace,"
"of(Place,CUDAPlace,CPUPlace,XPUPlace,CUDAPinnedPlace,"
"CustomPlace), "
"but got %s",
arg_pos + 1,
Expand Down
Loading