Skip to content

Commit

Permalink
remove some [-Wunused-parameter] warning (#53683)
Browse files Browse the repository at this point in the history
* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
  • Loading branch information
Galaxy1458 authored May 11, 2023
1 parent 2f56b6d commit dbb6269
Show file tree
Hide file tree
Showing 19 changed files with 30 additions and 28 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/global_gather_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class GlobalGatherOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support global gather op for cpu kernel now."));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/global_scatter_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class GlobalScatterOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support global scatter op for cpu kernel now."));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/partial_allgather_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class PartialAllGatherOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support partial_allgather for cpu kernel now."));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/partial_recv_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class PartialRecvOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support partial_recv for cpu kernel now."));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/partial_send_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class PartialSendOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support partial_send for cpu kernel now."));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/recv_v2_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class RecvOpV2CPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support recv for cpu kernel now."));
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/impl/eigvalsh_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ template <typename T, typename Context>
void EigvalshGradKernel(const Context& dev_ctx,
const DenseTensor& out_v,
const DenseTensor& out_w_grad,
const std::string& uplo,
bool is_test,
const std::string& uplo UNUSED,
bool is_test UNUSED,
DenseTensor* x_grad) {
auto tV = phi::TransposeLast2Dim<T>(dev_ctx, phi::Conj<T>(dev_ctx, out_v));

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/einsum_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -752,7 +752,7 @@ void EinsumKernel(const Context& dev_ctx,
const std::string& equation,
DenseTensor* out,
std::vector<DenseTensor*> cache,
std::vector<DenseTensor*> xshape) {
std::vector<DenseTensor*> xshape UNUSED) {
std::vector<char> tmp;
// for the sake of compatibility, we may load and run v2.3 EinsumOp. Output
// may have nullptr and the cache.size() is not equal to inputs.size(). refer
Expand Down
10 changes: 6 additions & 4 deletions paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,9 @@ void SubtractDoubleGradImpl(const Context& dev_ctx,

template <typename T>
struct DivGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout / y; }
HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const {
return dout / y;
}
};

template <typename T>
Expand All @@ -136,7 +138,7 @@ struct DivGradDX<phi::dtype::complex<T>> {

template <typename T>
struct DivGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x UNUSED, T y, T out, T dout) const {
return -dout * out / y;
}
};
Expand Down Expand Up @@ -857,14 +859,14 @@ struct MinGradDy {

template <typename T>
struct HeavisideGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return dout * static_cast<T>(0);
}
};

template <typename T>
struct HeavisideGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const {
return dout * static_cast<T>(x == static_cast<T>(0));
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/lamb_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ void ComputeImpl(const Context& dev_ctx,
float beta1_f,
float beta2_f,
float epsilon_f,
bool multi_precision,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* mom1_out,
DenseTensor* mom2_out,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/lu_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ void Unpack_Pivot(const Context& dev_ctx,
const DenseTensor& Pivot,
DenseTensor* P,
int h,
int w) {
int w UNUSED) {
auto dims = Pivot.dims();
auto Pdimvec = vectorize(dims);
auto prank = Pdimvec.size();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/unstack_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ template <typename T, typename Context>
void UnStackKernel(const Context &dev_ctx,
const DenseTensor &x,
int axis,
int num,
int num UNUSED,
std::vector<DenseTensor *> outs) {
auto *dy = &x;
auto dx = outs;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/onednn/conv_handler.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,10 +240,10 @@ class ConvOneDNNHandlerT
const std::string& padding_algorithm,
const std::vector<int>& dilations_in,
int groups,
const std::string& data_format,
const std::string& data_format UNUSED,
bool is_test,
phi::DenseTensor* filter_grad,
phi::DenseTensor* in_x_grad,
phi::DenseTensor* filter_grad UNUSED,
phi::DenseTensor* in_x_grad UNUSED,
const std::string& unique_name)
: funcs::OneDNNHandlerT<T,
dnnl::convolution_forward,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/pool_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void Pool2dKernel(const Context& dev_ctx,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& data_format UNUSED,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/reduce_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ void ReduceGradKernel(const Context& dev_ctx,
bool reduce_all,
DenseTensor* x_grad,
dnnl::algorithm binary_type,
dnnl::algorithm reduction_type,
dnnl::algorithm reduction_type UNUSED,
float scale_x,
float scale_y) {
reduce_all = recompute_reduce_all(x, dims, reduce_all);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/slice_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ void SliceKernel(const Context& dev_ctx,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& infer_flags UNUSED,
const std::vector<int64_t>& decrease_axis,
DenseTensor* out) {
const auto& onednn_engine = dev_ctx.GetEngine();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/selected_rows/impl/lamb_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ void ComputeRowImpl(const Context& dev_ctx,
float beta1_f,
float beta2_f,
float epsilon_f,
bool multi_precision,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* mom1_out,
DenseTensor* mom2_out,
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/sparse/cpu/conv_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ void Conv3dCooGradCPUKernel(const CPUContext& dev_ctx,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const std::vector<int>& paddings UNUSED,
const std::vector<int>& dilations UNUSED,
const std::vector<int>& strides UNUSED,
const int groups UNUSED,
const bool subm,
const std::string& key,
SparseCooTensor* x_grad,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/sparse/cpu/conv_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ void Conv3dCooCPUKernel(const CPUContext& dev_ctx,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const int groups UNUSED,
const bool subm,
const std::string& key,
SparseCooTensor* out,
Expand Down

0 comments on commit dbb6269

Please sign in to comment.