forked from mosaicml/llm-foundry
-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
326 lines (280 loc) · 12.3 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import os
import sys
import warnings
from composer import Trainer
from composer.core import Evaluator
from composer.utils import dist, get_device, reproducibility
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
from transformers import PreTrainedTokenizer
from llmfoundry import (COMPOSER_MODEL_REGISTRY, ComposerHFCausalLM,
MPTForCausalLM, build_finetuning_dataloader,
build_text_denoising_dataloader)
from llmfoundry.data.text_data import build_text_dataloader
from llmfoundry.models.utils import init_empty_weights
from llmfoundry.utils.builders import (build_algorithm, build_callback,
build_icl_evaluators, build_logger,
build_optimizer, build_scheduler,
build_tokenizer)
from llmfoundry.utils.config_utils import log_config, update_batch_size_info
def validate_config(cfg):
"""Validates compatible model and dataloader selection."""
loaders = [cfg.train_loader]
if 'eval_loader' in cfg:
loaders.append(cfg.eval_loader)
for loader in loaders:
if loader.name == 'text':
if cfg.model.name in ['hf_prefix_lm', 'hf_t5']:
raise ValueError(
f'Model type "{cfg.model.name}" is not supported when using the "text " ' +\
f'dataloader. Please use the "text_denoising" dataloader to pre-train that model type.')
elif loader.name == 'text_denoising':
if cfg.model.name == 'hf_causal_lm':
raise ValueError(
f'Model type "{cfg.model.name}" is not supported when using the "text_denoising" ' +\
f'dataloader. Please use the "text" dataloader to pre-train that model type.')
if loader.mixture_of_denoisers.decoder_only_format and cfg.model.name == 'hf_t5':
warnings.warn(
'Model type "hf_t5" requires `decoder_only_format` to be ``False``. ' +\
'Overriding `decoder_only_format` from ``True`` to ``False``.')
loader.mixture_of_denoisers.decoder_only_format = False
if (not loader.mixture_of_denoisers.decoder_only_format
) and cfg.model.name == 'hf_prefix_lm':
warnings.warn(
'Model type "hf_prefix_lm" requires `decoder_only_format` to be ``True``. ' +\
'Overriding `decoder_only_format` from ``False`` to ``True``.')
loader.mixture_of_denoisers.decoder_only_format = True
if 'icl_tasks' in cfg:
if cfg.model.name == 'hf_t5':
raise ValueError(
'ICL evaluation does not currently support Encoder-Decoder models, such as "hf_t5".'
)
def build_composer_model(model_cfg, tokenizer):
warnings.filterwarnings(
action='ignore',
message='Torchmetrics v0.9 introduced a new argument class property')
if model_cfg.name not in COMPOSER_MODEL_REGISTRY:
raise ValueError(
f'Not sure how to build model with name={model_cfg.name}')
return COMPOSER_MODEL_REGISTRY[model_cfg.name](model_cfg, tokenizer)
def build_composer_peft_model(
model_cfg: DictConfig, lora_cfg: DictConfig,
tokenizer: PreTrainedTokenizer) -> ComposerHFCausalLM:
try:
from peft import LoraConfig, get_peft_model
except ImportError as e:
raise ImportError(
'Error importing from peft. Please verify that peft and peft utils '
'are installed by running `pip install -e .[peft]` from `llm-foundry/`.'
f'Error encountered: {e}')
# 1) loads a hf model, 2) adds peft modules, 3) wraps it in a ComposerHFCausalLM.
print('Building Lora config...')
lora_cfg = LoraConfig(**lora_cfg.args)
print('Building model from HuggingFace checkpoint...')
model = MPTForCausalLM.from_pretrained(
cfg.model.pretrained_model_name_or_path, trust_remote_code=True)
print('Model built!')
print('Adding Lora modules...')
model = get_peft_model(model, lora_cfg)
print('Lora modules added!')
model = ComposerHFCausalLM(model, tokenizer)
return model
def print_trainable_parameters(model) -> None:
# Prints the number of trainable parameters in the model.
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f'trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}'
)
def build_dataloader(cfg, tokenizer, device_batch_size):
if cfg.name == 'text':
return build_text_dataloader(
cfg,
tokenizer,
device_batch_size,
)
elif cfg.name == 'text_denoising':
return build_text_denoising_dataloader(
cfg,
tokenizer,
device_batch_size,
)
elif cfg.name == 'finetuning':
return build_finetuning_dataloader(
cfg,
tokenizer,
device_batch_size,
)
else:
raise ValueError(f'Not sure how to build dataloader with config: {cfg}')
def main(cfg):
# Check for incompatibilities between the model and data loaders
validate_config(cfg)
# Filter deprecation warning from torch internal usage
warnings.filterwarnings(
action='ignore',
category=UserWarning,
message=
f'torch.distributed.*_base is a private function and will be deprecated.*'
)
cfg.dist_timeout = cfg.get('dist_timeout', 600.0)
reproducibility.seed_all(cfg.seed)
dist.initialize_dist(get_device(None), timeout=cfg.dist_timeout)
# Run Name
if cfg.get('run_name') is None:
cfg.run_name = os.environ.get('RUN_NAME', 'llm')
# Get batch size info
cfg = update_batch_size_info(cfg)
# Read FSDP Config as a dict
fsdp_config = cfg.get('fsdp_config', None)
fsdp_config = om.to_container(fsdp_config,
resolve=True) if fsdp_config else None
if dist.get_world_size() == 1 and fsdp_config is not None:
warnings.warn(
'FSDP is not applicable for single-GPU training. Reverting to DDP.')
cfg.pop('fsdp_config')
fsdp_config = None
# Restrict model init_device to 'meta' and 'cpu',
# using 'cuda' vs. 'cuda:id' is tricky and can lead to common user errors
# when multiple GPUs are available.
# Also 'meta' is only valid when using FSDP
init_context = contextlib.nullcontext()
if 'init_device' in cfg.model:
assert cfg.model.init_device in ['meta', 'cpu', 'mixed']
if fsdp_config is None and cfg.model.init_device == 'meta':
warnings.warn(
"Using `cfg.model.init_device='meta'` is only valid when using FSDP! " +\
"Reverting to `cfg.model.init_device='cpu'`.")
cfg.model.init_device = 'cpu'
if cfg.model.init_device == 'meta':
init_context = init_empty_weights()
if cfg.model.init_device == 'mixed':
if fsdp_config is None:
raise NotImplementedError(
'Using init_device `mixed` is only supported with FSDP. '
'Please add a FSDP config.')
# Always set `sync_module_states` to True for mixed initialization
if not fsdp_config.get('sync_module_states', False):
warnings.warn((
'Setting `sync_module_states = True` for FSDP. This is required '
'when using mixed initialization.'))
fsdp_config['sync_module_states'] = True
# Set defaults for mixed initialization
fsdp_config.setdefault('use_orig_params', False)
fsdp_config.setdefault('load_monolith_rank0_only', True)
# build tokenizer
tokenizer = build_tokenizer(cfg.tokenizer)
# Build Model
print('Initializing model...')
with init_context:
if cfg.get('lora',
None) is not None: # frozen model + trainable lora modules
model: ComposerHFCausalLM = build_composer_peft_model(
cfg.model, cfg.lora, tokenizer)
print_trainable_parameters(model) # should not be 100%
else: # standard model
model = build_composer_model(cfg.model, tokenizer)
cfg.n_params = sum(p.numel() for p in model.parameters())
print(f'{cfg.n_params=:.2e}')
# Dataloaders
print('Building train loader...')
train_loader = build_dataloader(
cfg.train_loader,
tokenizer,
cfg.device_train_batch_size,
)
print('Building eval loader...')
evaluators = []
if 'eval_loader' in cfg:
eval_loader = Evaluator(label='eval',
dataloader=build_dataloader(
cfg.eval_loader, tokenizer,
cfg.device_eval_batch_size),
metric_names=list(model.train_metrics.keys()))
evaluators.append(eval_loader)
if 'icl_tasks' in cfg:
icl_evaluators, _ = build_icl_evaluators(cfg.icl_tasks, tokenizer,
cfg.max_seq_len,
cfg.device_eval_batch_size)
evaluators.extend(icl_evaluators)
# Optimizer
optimizer = build_optimizer(cfg.optimizer, model)
# Scheduler
scheduler = build_scheduler(cfg.scheduler)
# Loggers
loggers = [
build_logger(name, logger_cfg)
for name, logger_cfg in (cfg.get('loggers') or {}).items()
]
# Callbacks
callbacks = [
build_callback(name, callback_cfg)
for name, callback_cfg in (cfg.get('callbacks') or {}).items()
]
# Algorithms
algorithms = [
build_algorithm(name, algorithm_cfg)
for name, algorithm_cfg in (cfg.get('algorithms') or {}).items()
]
# Build the Trainer
print('Building trainer...')
trainer = Trainer(
run_name=cfg.run_name,
seed=cfg.seed,
model=model,
train_dataloader=train_loader,
eval_dataloader=evaluators,
optimizers=optimizer,
schedulers=scheduler,
max_duration=cfg.max_duration,
eval_interval=cfg.eval_interval,
eval_subset_num_batches=cfg.get('eval_subset_num_batches', -1),
progress_bar=cfg.get('progress_bar', False),
log_to_console=cfg.get('log_to_console', True),
console_log_interval=cfg.get('console_log_interval', '1ba'),
loggers=loggers,
callbacks=callbacks,
precision=cfg.precision,
algorithms=algorithms,
device_train_microbatch_size=cfg.get('device_train_microbatch_size',
'auto'),
fsdp_config=fsdp_config, # type: ignore
save_folder=cfg.get('save_folder', None),
save_filename=cfg.get('save_filename',
'ep{epoch}-ba{batch}-rank{rank}.pt'),
save_latest_filename=cfg.get('save_latest_filename',
'latest-rank{rank}.pt'),
save_interval=cfg.get('save_interval', '1000ba'),
save_num_checkpoints_to_keep=cfg.get('save_num_checkpoints_to_keep',
-1),
save_overwrite=cfg.get('save_overwrite', False),
save_weights_only=cfg.get('save_weights_only', False),
load_path=cfg.get('load_path', None),
load_weights_only=cfg.get('load_weights_only', False),
load_ignore_keys=cfg.get('load_ignore_keys', None),
autoresume=cfg.get('autoresume', False),
python_log_level=cfg.get('python_log_level', 'debug'),
dist_timeout=cfg.dist_timeout,
)
print('Logging config...')
log_config(cfg)
if cfg.get('eval_first',
False) and trainer.state.timestamp.batch.value == 0:
trainer.eval()
print('Starting training...')
trainer.fit()
print('Done.')
if __name__ == '__main__':
yaml_path, args_list = sys.argv[1], sys.argv[2:]
with open(yaml_path) as f:
yaml_cfg = om.load(f)
cli_cfg = om.from_cli(args_list)
cfg = om.merge(yaml_cfg, cli_cfg)
main(cfg)