Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Version bump to 0.6.0rc1.dev0 #422

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ RUN pip uninstall -y megatron-core && \
fi && \
pip install -e .

RUN pip install --no-cache-dir lightning # can remove this when NEMO_TAG is bumped to include lightning install

COPY --from=aligner-bump /opt/NeMo-Aligner /opt/NeMo-Aligner
RUN cd /opt/NeMo-Aligner && \
pip install --no-deps -e .
Expand Down
2 changes: 1 addition & 1 deletion examples/nlp/gpt/serve_reward_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

import torch
from pytorch_lightning.trainer.trainer import Trainer
from lightning.pytorch.trainer.trainer import Trainer

from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/gpt_sft_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@

import hydra
import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_micro_batch_size, get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import get_iterator_k_split
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_critic.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
from enum import Enum

import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches, reconfigure_num_microbatches_calculator
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.transformer.module import Float16Module
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_dpo_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@
from functools import partial

import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_kto_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@
from functools import partial

import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_ppo_actor.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@

import torch
import torch.distributed
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@


import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.modules.common.megatron.utils import (
average_losses_across_data_parallel_group,
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_reward_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
from typing import List, Tuple, Union

import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel, get_specs
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_rs_actor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@
from contextlib import nullcontext

import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core import parallel_state
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.utils import divide
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/models/nlp/gpt/megatron_gpt_spin_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@
from contextlib import nullcontext

import torch
from lightning.pytorch.trainer.trainer import Trainer
from megatron.core.num_microbatches_calculator import get_num_microbatches
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer

from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import (
Expand Down
2 changes: 1 addition & 1 deletion nemo_aligner/package_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
MAJOR = 0
MINOR = 6
PATCH = 0
PRE_RELEASE = "rc0"
PRE_RELEASE = "rc1"
DEV = "dev0"

# Use the following formatting: (major, minor, patch, pre-release, dev)
Expand Down
4 changes: 2 additions & 2 deletions nemo_aligner/utils/train_script_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
from dataclasses import dataclass
from functools import partial

from lightning.pytorch.trainer import call
from lightning.pytorch.trainer.states import TrainerFn
from omegaconf import open_dict
from omegaconf.omegaconf import OmegaConf
from pytorch_lightning.trainer import call
from pytorch_lightning.trainer.states import TrainerFn

from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronTrainerBuilder
from nemo.collections.nlp.parts.peft_config import PEFT_CONFIG_MAP
Expand Down
1 change: 1 addition & 0 deletions setup/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
jsonlines
lightning>2.2.1
megatron_core>=0.8
nemo_toolkit[nlp]
nvidia-pytriton
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
import os

import pytest
from lightning.pytorch import Trainer
from omegaconf import DictConfig
from pytorch_lightning import Trainer

from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
Expand Down
Loading