Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Trainer] Fix distributed dataloader #8932

Merged
merged 4 commits into from
Aug 16, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions paddlenlp/data/dist_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ def __len__(self):
return 0


class IterableDummyDataset(paddle.io.IterableDataset):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

我在想,是不是可以 数据集那里,自己去构造 Fake的 dataset

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不太理解什么意思,现在这么写我感觉没啥问题?

def __iter__(self):
return None


class DistDataLoader(paddle.io.DataLoader):
"""
DistDataLoader is a wrapper of paddle.io.DataLoader.
Expand All @@ -57,10 +62,11 @@ def __init__(
worker_init_fn=None,
persistent_workers=False,
eval=False,
is_iterable_dataset=False,
):

if dataset is None:
dataset = DummyDataset()
dataset = DummyDataset() if not is_iterable_dataset else IterableDummyDataset()
logger.info("rank has no data, use Dummpy dataset")

super().__init__(dataset=dataset, batch_sampler=batch_sampler, collate_fn=collate_fn, num_workers=num_workers)
Expand Down Expand Up @@ -200,7 +206,7 @@ def __next__(self):
try:
data = next(self._dataloader_iter)
data = nested_copy_place(data, place=paddle.framework._current_expected_place())
except:
pass
except Exception as e:
logger.debug(e)
data = self._broadcast_data(data)
return data
148 changes: 88 additions & 60 deletions paddlenlp/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1398,12 +1398,15 @@ def get_train_dataloader(self):
raise ValueError("We don't need train_dataset when should_load_dataset is False.")

train_dataset = self.train_dataset
if self.args.distributed_dataloader:
is_iterable_dataset = self._is_iterable_dataset_dd(train_dataset)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
is_iterable_dataset = self._is_iterable_dataset_dd(train_dataset)
is_iterable_dataset = self._is_iterable_dataset_distributed(train_dataset)

else:
is_iterable_dataset = self._is_iterable_dataset(train_dataset)
if is_datasets_available() and train_dataset is not None and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
_DataLoader = DistDataLoader if self.args.distributed_dataloader else DataLoader

if self._is_iterable_dataset(train_dataset):
if self.args.dataset_world_size > 1:
if is_iterable_dataset: # For iterable dataset
if self.args.dataset_world_size > 1 and train_dataset is not None:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.per_device_train_batch_size,
Expand All @@ -1412,25 +1415,34 @@ def get_train_dataloader(self):
process_index=self.args.dataset_rank,
)

if self.args.distributed_dataloader:
logger.info("Training using DistDataLoader.")
return DistDataLoader(
train_dataset,
batch_size=self.args.per_device_train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
is_iterable_dataset=True,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以使用一个 additional_args = {} 然后 **additional_args 传参。依然保持 DistDataLoader、 DataLoader 合并

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不太行,因为Paddle的DataLoader不支持可变数量参数输入,除非修改Paddle。

)
else:
return DataLoader(
train_dataset,
batch_size=self.args.per_device_train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

在普通的DataLoader会触发相关的问题吗?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不会,详见PR描述的卡住原因。

)
else:
train_sampler = self._get_train_sampler()
_DataLoader = DistDataLoader if self.args.distributed_dataloader else DataLoader
if self.args.distributed_dataloader:
logger.info("Training using DistDataLoader.")
return _DataLoader(
train_dataset,
batch_size=self.args.per_device_train_batch_size,
batch_sampler=train_sampler,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
)

train_sampler = self._get_train_sampler()

if self.args.distributed_dataloader:
logger.info("Training using DistDataLoader.")

return _DataLoader(
train_dataset,
batch_sampler=train_sampler,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
)

def _get_eval_sampler(self, eval_dataset: Dataset):
if eval_dataset is None or not has_length(eval_dataset):
return None
Expand Down Expand Up @@ -1476,12 +1488,15 @@ def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoa
raise ValueError("We don't need eval_dataset when should_load_dataset is False.")

eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset

if self.args.distributed_dataloader:
is_iterable_dataset = self._is_iterable_dataset_dd(eval_dataset)
else:
is_iterable_dataset = self._is_iterable_dataset(eval_dataset)
if is_datasets_available() and eval_dataset is not None and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")

if self._is_iterable_dataset(eval_dataset):
if self.args.dataset_world_size > 1:
if is_iterable_dataset:
if self.args.dataset_world_size > 1 and eval_dataset is not None:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
Expand All @@ -1497,6 +1512,7 @@ def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoa
collate_fn=self.data_collator,
num_workers=0,
eval=True,
is_iterable_dataset=True,
)
else:
return DataLoader(
Expand All @@ -1505,26 +1521,24 @@ def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoa
collate_fn=self.data_collator,
num_workers=0,
)

eval_sampler = self._get_eval_sampler(eval_dataset)

if self.args.distributed_dataloader:
logger.info("Eval using DistDataLoader.")

return DistDataLoader(
eval_dataset,
batch_sampler=eval_sampler,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
eval=True,
)
else:
return DataLoader(
eval_dataset,
batch_sampler=eval_sampler,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
if self.args.distributed_dataloader:
logger.info("Eval using DistDataLoader.")
return DistDataLoader(
eval_dataset,
batch_sampler=eval_sampler,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
eval=True,
)
else:
return DataLoader(
eval_dataset,
batch_sampler=eval_sampler,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
)

def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Expand All @@ -1542,11 +1556,15 @@ def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
if not self.args.should_load_dataset and test_dataset is not None:
raise ValueError("We don't need test_dataset when should_load_dataset is False.")

if self.args.distributed_dataloader:
is_iterable_dataset = self._is_iterable_dataset_dd(test_dataset)
else:
is_iterable_dataset = self._is_iterable_dataset(test_dataset)
if is_datasets_available() and test_dataset is not None and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")

if self._is_iterable_dataset(test_dataset):
if self.args.dataset_world_size > 1:
if is_iterable_dataset:
if self.args.dataset_world_size > 1 and test_dataset is not None:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.per_device_eval_batch_size,
Expand All @@ -1562,6 +1580,7 @@ def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
collate_fn=self.data_collator, # _get_collator_with_removed_columns
num_workers=self.args.dataloader_num_workers,
eval=True,
is_iterable_dataset=True,
)
else:
return DataLoader(
Expand All @@ -1570,27 +1589,25 @@ def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
collate_fn=self.data_collator, # _get_collator_with_removed_columns
num_workers=self.args.dataloader_num_workers,
)

test_sampler = self._get_eval_sampler(test_dataset)

if self.args.distributed_dataloader:
logger.info("Test using DistDataLoader.")

# We use the same batch_size as for eval.
return DistDataLoader(
test_dataset,
batch_sampler=test_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
eval=True,
)
else:
return DataLoader(
test_dataset,
batch_sampler=test_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
)
test_sampler = self._get_eval_sampler(test_dataset)
if self.args.distributed_dataloader:
logger.info("Test using DistDataLoader.")
# We use the same batch_size as for eval.
return DistDataLoader(
test_dataset,
batch_sampler=test_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
eval=True,
)
else:
return DataLoader(
test_dataset,
batch_sampler=test_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
)

def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Expand Down Expand Up @@ -1694,6 +1711,8 @@ def _load_rng_state(self, checkpoint):

if self.args.use_hybrid_parallel:
if "hybrid_parallel_rng_state_tracker" in checkpoint_rng_state:
if self.args.tensor_parallel_degree <= 1:
checkpoint_rng_state["hybrid_parallel_rng_state_tracker"].pop("model_parallel_rng", None)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里触发hang住的原因,请文字说明清楚

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这行不会触发hang住,只是修了bug。如果非tp但是rng_state里面有tp的种子,加载起来会报错。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

在PR描述里解释了

fleet.meta_parallel.get_rng_state_tracker().set_states_tracker(
checkpoint_rng_state["hybrid_parallel_rng_state_tracker"]
)
Expand Down Expand Up @@ -3201,6 +3220,15 @@ def _get_collator_with_removed_columns(
def _is_iterable_dataset(self, dataset):
return isinstance(dataset, paddle.io.IterableDataset)

def _is_iterable_dataset_dd(self, dataset):
# For distributed dataloaer.
is_iterable_dataset_tensor = paddle.to_tensor(self._is_iterable_dataset(dataset)).reshape([1])
if dist.get_world_size() > 1:
dist.all_reduce(is_iterable_dataset_tensor, op=dist.ReduceOp.MAX)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

NPU不支持bool类型通信,需要兼容

if is_iterable_dataset_tensor.item() == 1:
return True
return False

def print_config(self, args=None, key=""):
"""
print config values
Expand Down
22 changes: 2 additions & 20 deletions tests/trainer/test_unified_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,7 +659,7 @@ def setUp(self):
self.need_allclose = True
self.rtol = 1e-7

self.run_pretrain_file = "llm/llama/run_pretrain.py"
self.run_pretrain_file = "llm/run_pretrain.py"

def runfirst(self, train_args):
train_args["unified_checkpoint"] = 0
Expand Down Expand Up @@ -701,7 +701,7 @@ def setUp(self):
self.need_allclose = True
self.rtol = 1e-7

self.run_pretrain_file = "llm/llama/run_pretrain.py"
self.run_pretrain_file = "llm/run_pretrain.py"
self.filelists = [
"config.json",
"master_weights-00001-of-00002.safetensors",
Expand Down Expand Up @@ -1132,24 +1132,6 @@ def rerun(self, train_args):
np.testing.assert_allclose(res[0], res[-1], rtol=self.rtol)


@pytest.mark.skipif(True, reason="Skip for None CE")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

为啥删了?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

日后如果增加了ignore_merge_optimizer的选项,会和skip_save_model_weight产生冲突,所以删掉了。

class TestUnifiedCheckpointOnN1C8EnableAll(TestUnifiedCheckpointBase):
def setUp(self):
super().setUp()
for config_key in self.configs:
self.configs[config_key]["unified_checkpoint"] = 1
self.configs[config_key]["unified_checkpoint_config"] = "enable_all_options"

self.need_allclose = True
self.rtol = 1e-7

def runfirst(self, train_args):
self.run_n1c8(self.run_pretrain_file, **train_args)

def rerun(self, train_args):
self.run_n1c8(self.run_pretrain_file, **train_args)


@pytest.mark.skipif(True, reason="Skip for None CE")
class TestUnifiedCheckpointOnN1C8SaveLoadSpeed(TestUnifiedCheckpointFull):
def setUp(self):
Expand Down
Loading