Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove some [-Wunsed-parameter] warning #53687

Merged
merged 10 commits into from
May 15, 2023
2 changes: 1 addition & 1 deletion paddle/phi/core/utils/unroll_array_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ struct UnrollFillConstant {
template <size_t kStart, size_t kEnd>
struct UnrollFillConstant<kStart, kEnd, true> {
template <typename T>
HOSTDEVICE inline static void Run(T *data, T val) {}
HOSTDEVICE inline static void Run(T *data UNUSED, T val UNUSED) {}
};

template <size_t kStart, size_t kEnd, bool kStop>
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/amp_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace phi {
template <typename T, bool IsFoundInfOnCPU>
class UpdateLossScalingFunctor<phi::CPUContext, T, IsFoundInfOnCPU> {
public:
void operator()(const phi::CPUContext& ctx,
void operator()(const phi::CPUContext& ctx UNUSED,
const bool* found_inf_data,
const T* pre_loss_scaling_data,
const int* good_in_data,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/argsort_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ void ArgsortGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
int axis,
bool descending,
bool descending UNUSED,
DenseTensor* in_grad) {
auto in_dims = indices.dims();
auto rank = input.dims().size();
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/cpu/elementwise.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,10 +122,10 @@ struct SameDimsDivideFunctor<
DevCtx,
T,
typename std::enable_if<!std::is_floating_point<T>::value>::type> {
void operator()(const DevCtx& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* z) {
void operator()(const DevCtx& dev_ctx UNUSED,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
DenseTensor* z UNUSED) {
phi::errors::InvalidArgument(
"If use SameDimsDivideFunctor, template args(T) must be floating "
"point. ");
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/cpu/hsigmoid_loss_grad.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ void HSigmoidLossGradKernelImpl(const Context& ctx,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const paddle::optional<DenseTensor>& bias
UNUSED,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/index_sample_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ void IndexSampleGradInner(const Context& context,

template <typename T, typename Context>
void IndexSampleGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/index_select_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ namespace phi {

template <typename Context, typename T, class Enable = void>
struct IndexSelectAdd {
void operator()(const Context& ctx,
void operator()(const Context& ctx UNUSED,
int slice_size,
const T* src_pointer,
const T* p_pointer,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/kthvalue_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& d_out,
int k,
int k UNUSED,
int axis,
bool keepdim,
DenseTensor* d_x) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/masked_select_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace phi {

template <typename T, typename Context>
void MaskedSelectGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& mask,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/nanmedian_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void CalcMedianGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& median_index,
const DenseTensor& out_grad,
const IntArray& axes,
const IntArray& axes UNUSED,
DenseTensor* x_grad,
T* x_grad_ptr) {
phi::funcs::SetConstant<Context, T> set_zero;
Expand Down Expand Up @@ -83,7 +83,7 @@ void NanmedianGradKernel(const Context& dev_ctx,
const DenseTensor& median_index,
const DenseTensor& out_grad,
const IntArray& axes,
bool keep_dim,
bool keep_dim UNUSED,
DenseTensor* x_grad) {
BaseMedianGradKernel<T, Context>(
dev_ctx, input, median_index, out_grad, axes, x_grad);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/nanmedian_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ template <typename T, typename Context>
void NanmedianKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
bool keepdim,
bool keepdim UNUSED,
DenseTensor* out,
DenseTensor* median_index) {
BaseMedianKernel<T, Context>(dev_ctx, x, axes, out, median_index, true);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/randperm_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void RandpermKernel(const Context& dev_ctx,
int n,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out);
int seed = 0;
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/repeat_interleave_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ namespace phi {
template <typename T, typename Context>
void RepeatInterleaveWithTensorIndexGradKernel(
const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& repeats_tensor,
const DenseTensor& out_grad,
int dim,
Expand Down Expand Up @@ -74,7 +74,7 @@ void RepeatInterleaveWithTensorIndexGradKernel(

template <typename T, typename Context>
void RepeatInterleaveGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int repeats,
int dim,
Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/kernels/cpu/rnn_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -164,19 +164,19 @@ template <typename T, template <typename> class EigenActivationBackwardFunctor>
struct SimpleRNNGradCell : GradCell<T> {
void operator()(const CPUContext& dev_ctx,
DenseTensor* gate_tensor,
DenseTensor* state_tensor,
DenseTensor* act_state_tensor,
DenseTensor* state_tensor UNUSED,
DenseTensor* act_state_tensor UNUSED,
DenseTensor* hidden_tensor,
const DenseTensor* weight_hh,
DenseTensor* pre_hidden,
DenseTensor* pre_state,
DenseTensor* pre_state UNUSED,
DenseTensor* grad_hidden,
DenseTensor* grad_state,
DenseTensor* grad_state UNUSED,
DenseTensor* grad_gate,
DenseTensor* grad_weight_hh,
DenseTensor* grad_pre_hidden,
DenseTensor* grad_pre_state,
DenseTensor* grad_bias_hh,
DenseTensor* grad_pre_state UNUSED,
DenseTensor* grad_bias_hh UNUSED,
const DenseTensor& mask_tensor,
bool has_sequence_length) const override {
DenseTensor grad_pre_hidden_bak;
Expand Down
14 changes: 7 additions & 7 deletions paddle/phi/kernels/cpu/rnn_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,13 +57,13 @@ struct SimpleRNNCell : Cell<T> {
DenseTensor* input,
const DenseTensor* weight_hh,
const DenseTensor* init_h,
const DenseTensor* init_c,
DenseTensor* last_h,
DenseTensor* last_c,
DenseTensor* last_c_act,
DenseTensor* output,
const DenseTensor* bias_hh,
DenseTensor* weight_hh_gru) const override {
const DenseTensor* init_c UNUSED,
DenseTensor* last_h UNUSED,
DenseTensor* last_c UNUSED,
DenseTensor* last_c_act UNUSED,
DenseTensor* output UNUSED,
const DenseTensor* bias_hh UNUSED,
DenseTensor* weight_hh_gru UNUSED) const override {
auto blas = phi::funcs::GetBlas<CPUContext, T>(*dev_ctx);
auto mat_dim_a =
phi::funcs::CreateMatrixDescriptor(init_h->dims(), 0, false);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/scatter_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ namespace phi {
template <typename T, typename Context>
void ScatterGradKernel(const Context &ctx,
const DenseTensor &index,
const DenseTensor &updates,
const DenseTensor &updates UNUSED,
const DenseTensor &out_grad,
bool overwrite,
bool overwrite UNUSED,
DenseTensor *x_grad,
DenseTensor *updates_grad) {
const auto &index_type = index.dtype();
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/top_k_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ void TopkGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
bool largest UNUSED,
bool sorted UNUSED,
DenseTensor* x_grad) {
const auto& in_dims = x.dims();
const auto& out_dims = indices.dims();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/trunc_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace phi {

template <typename T, typename Context>
void TruncGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const DenseTensor& out_grad UNUSED,
DenseTensor* in_grad) {
T* dx_data = dev_ctx.template Alloc<T>(in_grad);

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/where_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ namespace phi {
template <typename T, typename Context>
void WhereGradKernel(const Context& ctx,
const DenseTensor& condition,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
const DenseTensor& out_grad,
DenseTensor* x_grad,
DenseTensor* y_grad) {
Expand Down