forked from PaddlePaddle/PaddleSpeech
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request PaddlePaddle#52 from chengduoZH/use_multi_processe…
…s_run_rcnn [multi process]Use multi process run mask rcnn
- Loading branch information
Showing
3 changed files
with
111 additions
and
9 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. | ||
# | ||
#Licensed under the Apache License, Version 2.0 (the "License"); | ||
#you may not use this file except in compliance with the License. | ||
#You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
#Unless required by applicable law or agreed to in writing, software | ||
#distributed under the License is distributed on an "AS IS" BASIS, | ||
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
#See the License for the specific language governing permissions and | ||
#limitations under the License. | ||
|
||
from __future__ import absolute_import | ||
from __future__ import division | ||
from __future__ import print_function | ||
import os | ||
import paddle.fluid as fluid | ||
|
||
def nccl2_prepare(trainer_id, startup_prog, main_prog): | ||
config = fluid.DistributeTranspilerConfig() | ||
config.mode = "nccl2" | ||
t = fluid.DistributeTranspiler(config=config) | ||
t.transpile(trainer_id, | ||
trainers=os.environ.get('PADDLE_TRAINER_ENDPOINTS'), | ||
current_endpoint=os.environ.get('PADDLE_CURRENT_ENDPOINT'), | ||
startup_program=startup_prog, | ||
program=main_prog) | ||
|
||
def prepare_for_multi_process(exe, build_strategy, train_prog, startup_prog): | ||
# prepare for multi-process | ||
trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0)) | ||
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) | ||
print("PADDLE_TRAINERS_NUM", num_trainers) | ||
print("PADDLE_TRAINER_ID", trainer_id) | ||
build_strategy.num_trainers = num_trainers | ||
build_strategy.trainer_id = trainer_id | ||
# NOTE(zcd): use multi processes to train the model, | ||
# and each process use one GPU card. | ||
if num_trainers > 1: | ||
nccl2_prepare(trainer_id, | ||
startup_prog, train_prog) | ||
# the startup_prog are run two times, but it doesn't matter. | ||
exe.run(startup_prog) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
#!bin/bash | ||
set -xe | ||
|
||
#export FLAGS_cudnn_deterministic=true | ||
#export FLAGS_enable_parallel_graph=1 | ||
export FLAGS_eager_delete_tensor_gb=0.0 | ||
export FLAGS_fraction_of_gpu_memory_to_use=0.98 | ||
export FLAGS_memory_fraction_of_eager_deletion=1.0 | ||
export FLAGS_conv_workspace_size_limit=1500 | ||
|
||
base_batch_size=1 | ||
|
||
export CUDA_VISIBLE_DEVICES=0,1 | ||
|
||
device=${CUDA_VISIBLE_DEVICES//,/ } | ||
arr=($device) | ||
num_gpu_devices=${#arr[*]} | ||
|
||
python -m paddle.distributed.launch --gpus ${num_gpu_devices} train.py \ | ||
--model_save_dir=output/ \ | ||
--pretrained_model=../imagenet_resnet50_fusebn/ \ | ||
--data_dir=./dataset/coco \ | ||
--im_per_batch=${base_batch_size} \ | ||
--MASK_ON=True |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters