From c565b77e752b8be660580bdf76d25496ce236e96 Mon Sep 17 00:00:00 2001 From: tianhaodongbd <137985359+tianhaodongbd@users.noreply.github.com> Date: Mon, 9 Dec 2024 14:54:22 +0800 Subject: [PATCH] [npu] fix where bug (#9573) * [npu] fix where bug * fix --- paddlenlp/transformers/llama/modeling.py | 1 - 1 file changed, 1 deletion(-) diff --git a/paddlenlp/transformers/llama/modeling.py b/paddlenlp/transformers/llama/modeling.py index 099abbbff68c..8bf0d5938902 100755 --- a/paddlenlp/transformers/llama/modeling.py +++ b/paddlenlp/transformers/llama/modeling.py @@ -1593,7 +1593,6 @@ def _prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values if get_env_device() in ["npu", "mlu", "intel_hpu"]: x = paddle.to_tensor(0.0, dtype="float32") y = paddle.to_tensor(paddle.finfo(dtype).min, dtype="float32") - expanded_attn_mask = expanded_attn_mask.astype("float32") expanded_attn_mask = paddle.where(expanded_attn_mask, x, y).astype(dtype) elif get_env_device() in ["xpu", "gcu"]: min_val = paddle.finfo(dtype).min if get_env_device() == "gcu" else -1e37 # mask value for xpu