Skip to content

Commit

Permalink
Add llama2-70b for test_tipc (#8455)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhangbo9674 authored May 17, 2024
1 parent 5d4ce56 commit b36b6a0
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

param="model_item=meta-llama-Llama-2-70b_pretrain_dy2st "
param+="run_mode=DP1_MP4_PP8_VPP5 "
param+="device_num=N4C32 "
param+="global_batch_size=32 "
param+="nnodes=4 "
param+="model_type=llama2_70b "

cd ./tests
bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/prepare.sh

bash -c "${param} bash ./test_tipc/static/auto_parallel/llama2/benchmark_common/run_benchmark.sh"

Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
{
"model_name_or_path": "meta-llama/Llama-2-70b",
"tokenizer_name_or_path": "meta-llama/Llama-2-70b",
"input_dir": "./data",
"output_dir": "./checkpoints/llama2_pretrain_ckpts",
"weight_decay": 0.01,
"warmup_ratio": 0.01,
"max_grad_norm": 1.0,
"learning_rate": 3e-05,
"min_learning_rate": 3e-06,
"warmup_steps": 30,
"logging_steps": 1,
"max_steps": 50,
"save_steps": 5000,
"eval_steps": 1000,
"continue_training": 0,
"do_train": true,
"do_eval": false,
"do_predict": false,
"disable_tqdm": true,
"skip_profile_timer": true,
"save_total_limit": 2,
"device": "gpu",
"dataloader_num_workers": 1,
"distributed_dataloader": 0,
"enable_auto_parallel": true,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 32,
"per_device_eval_batch_size": 32,
"recompute": false,
"recompute_use_reentrant": true,
"recompute_granularity": "full",
"pp_recompute_interval": 0,
"bf16": true,
"fp16_opt_level": "O2",
"amp_master_grad": true,
"amp_custom_black_list": ["reduce_sum", "c_softmax_with_cross_entropy"],
"amp_custom_white_list": ["lookup_table", "lookup_table_v2"],
"fuse_attention_ffn": true,
"fuse_attention_qkv": true,
"use_fused_rope": true,
"fused_linear_param_grad_add": true,
"fuse_sequence_parallel_allreduce": false,
"use_flash_attention": true,
"use_fused_rms_norm": true,
"sep_parallel_degree": 1,
"sequence_parallel": true,
"pipeline_parallel_degree": 8,
"sharding_parallel_degree": 1,
"sharding": "stage1",
"tensor_parallel_degree": 4,
"virtual_pp_degree": 5,
"pipeline_schedule_mode": "VPP",
"data_parallel_config": "enable_allreduce_avg_in_gradinent_scale gradient_sync_after_accumulate",
"sharding_parallel_config": "split_param enable_stage1_overlap",
"tensor_parallel_config": "enable_mp_async_allreduce",
"max_seq_length": 4096,
"to_static": true,
"eliminate_transpose": 1,
"fuse_allreduce_split_to_reducescatter": 1,
"sequence_parallel_config": "enable_allreduce_avg_in_gradinent_scale"
}

0 comments on commit b36b6a0

Please sign in to comment.