From 9d9bd67f29ecdd3fd88f9c7aa3989da7abc6ba05 Mon Sep 17 00:00:00 2001 From: Guo Sheng Date: Sat, 11 Dec 2021 00:12:23 +0800 Subject: [PATCH] Add use_faster to faster_gpt sample. (#1443) --- examples/language_model/gpt/faster_gpt/infer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/language_model/gpt/faster_gpt/infer.py b/examples/language_model/gpt/faster_gpt/infer.py index ff0e37c5a6c4..54bffbde980b 100644 --- a/examples/language_model/gpt/faster_gpt/infer.py +++ b/examples/language_model/gpt/faster_gpt/infer.py @@ -118,7 +118,8 @@ def do_predict(args): bos_token_id=bos_id, eos_token_id=eos_id, decode_strategy="sampling", - use_fp16_decoding=args.use_fp16_decoding) + use_fp16_decoding=args.use_fp16_decoding, + use_faster=True) output_sequence = out_seq.numpy() paddle.fluid.core._cuda_synchronize(place)