From b36b6a01a6dd65ea45f4e8db0177a61c80ae410e Mon Sep 17 00:00:00 2001 From: zhangbo9674 <82555433+zhangbo9674@users.noreply.github.com> Date: Fri, 17 May 2024 20:56:30 +0800 Subject: [PATCH] Add llama2-70b for test_tipc (#8455) --- ...etrain_dy2st_bs32_bf16_DP1_MP4_PP8_VPP5.sh | 26 ++++++++ .../pretrain-llama2_70b.json | 62 +++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-70b_pretrain_dy2st_bs32_bf16_DP1_MP4_PP8_VPP5.sh create mode 100644 tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_70b/pretrain-llama2_70b.json diff --git a/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-70b_pretrain_dy2st_bs32_bf16_DP1_MP4_PP8_VPP5.sh b/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-70b_pretrain_dy2st_bs32_bf16_DP1_MP4_PP8_VPP5.sh new file mode 100644 index 000000000000..6bdb2beb6cf9 --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/N4C32/meta-llama-Llama-2-70b_pretrain_dy2st_bs32_bf16_DP1_MP4_PP8_VPP5.sh @@ -0,0 +1,26 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +param="model_item=meta-llama-Llama-2-70b_pretrain_dy2st " +param+="run_mode=DP1_MP4_PP8_VPP5 " +param+="device_num=N4C32 " +param+="global_batch_size=32 " +param+="nnodes=4 " +param+="model_type=llama2_70b " + +cd ./tests +bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh + +bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh" + diff --git a/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_70b/pretrain-llama2_70b.json b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_70b/pretrain-llama2_70b.json new file mode 100644 index 000000000000..d1fe56c5e9be --- /dev/null +++ b/tests/test_tipc/static/auto_parallel/llama2/pretrain_config_llama2_70b/pretrain-llama2_70b.json @@ -0,0 +1,62 @@ +{ + "model_name_or_path": "meta-llama/Llama-2-70b", + "tokenizer_name_or_path": "meta-llama/Llama-2-70b", + "input_dir": "./data", + "output_dir": "./checkpoints/llama2_pretrain_ckpts", + "weight_decay": 0.01, + "warmup_ratio": 0.01, + "max_grad_norm": 1.0, + "learning_rate": 3e-05, + "min_learning_rate": 3e-06, + "warmup_steps": 30, + "logging_steps": 1, + "max_steps": 50, + "save_steps": 5000, + "eval_steps": 1000, + "continue_training": 0, + "do_train": true, + "do_eval": false, + "do_predict": false, + "disable_tqdm": true, + "skip_profile_timer": true, + "save_total_limit": 2, + "device": "gpu", + "dataloader_num_workers": 1, + "distributed_dataloader": 0, + "enable_auto_parallel": true, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 32, + "per_device_eval_batch_size": 32, + "recompute": false, + "recompute_use_reentrant": true, + "recompute_granularity": "full", + "pp_recompute_interval": 0, + "bf16": true, + "fp16_opt_level": "O2", + "amp_master_grad": true, + "amp_custom_black_list": ["reduce_sum", "c_softmax_with_cross_entropy"], + "amp_custom_white_list": ["lookup_table", "lookup_table_v2"], + "fuse_attention_ffn": true, + "fuse_attention_qkv": true, + "use_fused_rope": true, + "fused_linear_param_grad_add": true, + "fuse_sequence_parallel_allreduce": false, + "use_flash_attention": true, + "use_fused_rms_norm": true, + "sep_parallel_degree": 1, + "sequence_parallel": true, + "pipeline_parallel_degree": 8, + "sharding_parallel_degree": 1, + "sharding": "stage1", + "tensor_parallel_degree": 4, + "virtual_pp_degree": 5, + "pipeline_schedule_mode": "VPP", + "data_parallel_config": "enable_allreduce_avg_in_gradinent_scale gradient_sync_after_accumulate", + "sharding_parallel_config": "split_param enable_stage1_overlap", + "tensor_parallel_config": "enable_mp_async_allreduce", + "max_seq_length": 4096, + "to_static": true, + "eliminate_transpose": 1, + "fuse_allreduce_split_to_reducescatter": 1, + "sequence_parallel_config": "enable_allreduce_avg_in_gradinent_scale" +}