From fc6ab70e8e9361b3505e21dbe04a71aee68a1fc2 Mon Sep 17 00:00:00 2001 From: tianhaodongbd <137985359+tianhaodongbd@users.noreply.github.com> Date: Tue, 9 Jan 2024 15:08:18 +0800 Subject: [PATCH] [CE] Add MP2-SP2-pp4-vpp2-SD2-stage1-mbs2-acc8 ce (#7774) --- ...rain_bs32_bf16_MP2-SD4-stage1-mbs4-acc2.sh | 9 ++++- ...6_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh | 40 +++++++++++++++++++ ...6_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh | 40 +++++++++++++++++++ .../ce_gpt/benchmark_common/run_benchmark.sh | 11 +++-- 4 files changed, 95 insertions(+), 5 deletions(-) create mode 100644 tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh create mode 100644 tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh diff --git a/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N1C8/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SD4-stage1-mbs4-acc2.sh b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N1C8/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SD4-stage1-mbs4-acc2.sh index fe257f5970d5..1238bb41fd04 100644 --- a/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N1C8/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SD4-stage1-mbs4-acc2.sh +++ b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N1C8/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SD4-stage1-mbs4-acc2.sh @@ -18,6 +18,11 @@ mp_degree=2 pp_degree=1 sharding_degree=4 sharding=stage1 +virtual_pp_degree=1 +use_recompute=True +eval_freq=25 +use_pipeline_parallel=False +sequence_parallel=False bs_item=32 fp_item=bf16 run_mode=MP2-SD4-stage1-mbs4-acc2 @@ -26,8 +31,10 @@ max_iter=50000 model=gpt micro_bs=4 +acc=2 +seed=3589 bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/prepare.sh # run bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${dp_degree} ${mp_degree} ${pp_degree} ${micro_bs} ${bs_item} ${run_mode} ${device_num} \ -${max_iter} ${sharding} ${sharding_degree} 2>&1; +${max_iter} ${sharding} ${sharding_degree} ${virtual_pp_degree} ${use_recompute} ${eval_freq} ${use_pipeline_parallel} ${sequence_parallel} ${acc} ${seed} 2>&1; diff --git a/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh new file mode 100644 index 000000000000..e476037f9ada --- /dev/null +++ b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/CE_gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh @@ -0,0 +1,40 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +model_item=CE_gpt-345m_seqlen1024_pretrain +dp_degree=1 +mp_degree=2 +pp_degree=4 +bs_item=32 +fp_item=bf16 +run_mode=MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8 +device_num=N2C16 +max_iter=50000 +sharding=stage1 +sharding_degree=2 +virtual_pp_degree=2 +use_recompute=True +eval_freq=25 +use_pipeline_parallel=True +sequence_parallel=True + +model=gpt +micro_bs=2 +acc=8 +seed=3589 + +bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/prepare.sh +# run +bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${dp_degree} ${mp_degree} ${pp_degree} ${micro_bs} ${bs_item} ${run_mode} ${device_num} \ +${max_iter} ${sharding} ${sharding_degree} ${virtual_pp_degree} ${use_recompute} ${eval_freq} ${use_pipeline_parallel} ${sequence_parallel} ${acc} ${seed} 2>&1; \ No newline at end of file diff --git a/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh new file mode 100644 index 000000000000..03a50a587e9e --- /dev/null +++ b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/N2C16/gpt-345m_seqlen1024_pretrain_bs32_bf16_MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8.sh @@ -0,0 +1,40 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +model_item=gpt-345m_seqlen1024_pretrain +dp_degree=1 +mp_degree=2 +pp_degree=4 +bs_item=32 +fp_item=bf16 +run_mode=MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8 +device_num=N2C16 +max_iter=100 +sharding=stage1 +sharding_degree=2 + +virtual_pp_degree=2 +use_recompute=True +eval_freq=25 +use_pipeline_parallel=True +sequence_parallel=True + +model=gpt +micro_bs=2 +acc=8 + +bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/prepare.sh +# run +bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${dp_degree} ${mp_degree} ${pp_degree} ${micro_bs} ${bs_item} ${run_mode} ${device_num} \ +${max_iter} ${sharding} ${sharding_degree} ${virtual_pp_degree} ${use_recompute} ${eval_freq} ${use_pipeline_parallel} ${sequence_parallel} ${acc} 2>&1; \ No newline at end of file diff --git a/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh index 8ee5ec54d8ba..b32470ab8d1f 100644 --- a/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh +++ b/tests/test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh @@ -37,11 +37,13 @@ function _set_params(){ sharding_degree=${12:-"1"} num_workers=0 # (可选) base_batch_size=$global_batch_size - virtual_pp_degree=${13:-"2"} # (可选) virtualpp数据并行度 + vpp_degree=${13:-"1"} # (可选) virtualpp数据并行度 use_recompute=${14:-"True"} # (可选)是否打开recompute eval_freq=${15:-"25"} # (可选)模型评估间隔 use_pipeline_parallel=${16:-"False"} # (可选)是否开启pipeline_parallel_config sequence_parallel=${17:-"False"} # (可选)是否开启sequence_parallel + acc=${18:-"2"} + seed=${19:-"1234"} # 以下为通用执行命令,无特殊可不用修改 model_name=${model_item}_bs${global_batch_size}_${fp_item}_${run_mode} # (必填) 且格式不要改动,与竞品名称对齐 device=${CUDA_VISIBLE_DEVICES//,/ } @@ -108,10 +110,11 @@ function _train(){ --tensor_parallel_degree ${mp_degree} \ --pipeline_parallel_degree ${pp_degree} \ ${pp_config_disable_partial_send_recv} \ + --virtual_pp_degree ${vpp_degree} \ --sequence_parallel ${sequence_parallel} \ --split 949,50,1 \ --max_seq_length 1024 \ - --seed 1234 \ + --seed ${seed} \ --fuse_attention_qkv True \ --use_flash_attention True \ --bf16 ${bf16} \ @@ -125,7 +128,7 @@ function _train(){ --dataloader_num_workers 1 \ --eval_steps 1000 \ --disable_tqdm True \ - --gradient_accumulation_steps 2 \ + --gradient_accumulation_steps ${acc} \ --weight_decay 0.01\ --max_steps ${max_iter}\ --save_steps 5000\ @@ -150,7 +153,7 @@ function _train(){ run_pretrain.py ${train_cmd}" workerlog_id=0 ;; - DP8-mbs2-acc2|SD8-stage1-mbs2-acc2|SD8-stage2-mbs2-acc2|SD8-stage3-mbs2-acc2|MP2-SD4-stage1-mbs4-acc2|MP2-SP2-PP2-DP2-mbs8-acc2|MP8-mbs16-acc2|MP2-PP2-DP2-mbs8-acc2|MP2-PP2-SD2-Stage1-mbs8-acc2|MP2-SP2-PP2-SD2-Stage1-mbs8-acc2) echo "run run_mode: ${run_mode}" + DP8-mbs2-acc2|SD8-stage1-mbs2-acc2|SD8-stage2-mbs2-acc2|SD8-stage3-mbs2-acc2|MP2-SD4-stage1-mbs4-acc2|MP2-SP2-PP2-DP2-mbs8-acc2|MP8-mbs16-acc2|MP2-PP2-DP2-mbs8-acc2|MP2-PP2-SD2-Stage1-mbs8-acc2|MP2-SP2-PP2-SD2-Stage1-mbs8-acc2|MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8) echo "run run_mode: ${run_mode}" train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --devices=0,1,2,3,4,5,6,7 ${PADDLE_RANK_OPTION}\ run_pretrain.py ${train_cmd}" workerlog_id=0