From 7ad0286bdfc3c91f7b69c5c65d34f7ce839d79bb Mon Sep 17 00:00:00 2001 From: Mihir Patel Date: Wed, 4 Oct 2023 15:51:33 -0400 Subject: [PATCH] deprecate --- composer/callbacks/mlperf.py | 2 ++ composer/datasets/ade20k.py | 15 +++++++++++++++ composer/datasets/brats.py | 3 +++ composer/datasets/c4.py | 2 ++ composer/datasets/cifar.py | 5 +++++ composer/datasets/ffcv_utils.py | 3 +++ composer/datasets/imagenet.py | 11 +++++++++++ composer/datasets/lm_dataset.py | 3 +++ composer/datasets/mnist.py | 3 +++ composer/datasets/synthetic.py | 4 ++++ composer/datasets/utils.py | 4 ++++ composer/models/bert/model.py | 5 +++++ composer/models/classify_mnist/model.py | 3 +++ composer/models/deeplabv3/model.py | 4 ++++ composer/models/efficientnetb0/efficientnets.py | 3 +++ composer/models/efficientnetb0/model.py | 4 ++++ composer/models/gpt2/model.py | 3 +++ composer/models/mmdetection.py | 2 ++ composer/models/resnet/model.py | 2 ++ composer/models/resnet_cifar/model.py | 2 ++ composer/models/timm/model.py | 2 ++ composer/models/unet/model.py | 3 +++ composer/models/unet/unet.py | 3 +++ composer/models/vit_small_patch16/model.py | 3 +++ 24 files changed, 94 insertions(+) diff --git a/composer/callbacks/mlperf.py b/composer/callbacks/mlperf.py index e5752c2a80..ddd2f02a76 100644 --- a/composer/callbacks/mlperf.py +++ b/composer/callbacks/mlperf.py @@ -267,6 +267,8 @@ def _get_dataloader_stats(self, dataloader: Iterable): # attempt to import ffcv and test if its an ffcv loader. import ffcv # type: ignore + warnings.warn(DeprecationWarning('ffcv is deprecated and will be removed in v0.18')) + if isinstance(dataloader, ffcv.loader.Loader): # Use the cached attribute ffcv.init_traversal_order to compute number of samples return ( diff --git a/composer/datasets/ade20k.py b/composer/datasets/ade20k.py index 82603a59d2..23f801c485 100644 --- a/composer/datasets/ade20k.py +++ b/composer/datasets/ade20k.py @@ -8,6 +8,7 @@ """ import os +import warnings from math import ceil from typing import Any, Dict, Optional, Tuple, Union @@ -50,6 +51,8 @@ def build_ade20k_transformations(split, image_transforms (torch.nn.Module): Transformations to apply to the input image only. target_transforms (torch.nn.Module): Transformations to apply to the target semantic segmentation mask only. """ + warnings.warn(DeprecationWarning('build_ade20k_transformations is deprecated and will be removed in v0.18')) + if split == 'train': both_transforms = torch.nn.Sequential( RandomResizePair( @@ -110,6 +113,8 @@ def build_ade20k_dataloader( Default: ``true``. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_ade20k_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') @@ -195,6 +200,8 @@ def build_streaming_ade20k_dataloader( Defaults to ``None``, which is interpreted as the number of nodes of the initial run. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_streaming_ade20k_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') @@ -274,6 +281,8 @@ def build_synthetic_ade20k_dataloader( memory_format (:class:`composer.core.MemoryFormat`): Memory format of the tensors. Default: ``CONTIGUOUS_FORMAT``. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_synthetic_ade20k_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') @@ -321,6 +330,7 @@ def __init__(self, min_scale: float, max_scale: float, base_size: Optional[Tuple self.min_scale = min_scale self.max_scale = max_scale self.base_size = base_size + warnings.warn(DeprecationWarning('RandomResizePair is deprecated and will be removed in v0.18')) def forward(self, sample: Tuple[Image.Image, Image.Image]): image, target = sample @@ -348,6 +358,7 @@ def __init__(self, crop_size: Tuple[int, int], class_max_percent: float = 1.0, n self.crop_size = crop_size self.class_max_percent = class_max_percent self.num_retry = num_retry + warnings.warn(DeprecationWarning('RandomCropPair is deprecated and will be removed in v0.18')) def forward(self, sample: Tuple[Image.Image, Image.Image]): image, target = sample @@ -392,6 +403,7 @@ class RandomHFlipPair(torch.nn.Module): def __init__(self, probability: float = 0.5): super().__init__() self.probability = probability + warnings.warn(DeprecationWarning('RandomHFlipPair is deprecated and will be removed in v0.18')) def forward(self, sample: Tuple[Image.Image, Image.Image]): image, target = sample @@ -413,6 +425,7 @@ def __init__(self, size: Tuple[int, int], fill: Union[int, Tuple[int, int, int]] super().__init__() self.size = size self.fill = fill + warnings.warn(DeprecationWarning('PadToSize is deprecated and will be removed in v0.18')) def forward(self, image: Image.Image): padding = max(self.size[0] - image.height, 0), max(self.size[1] - image.width, 0) @@ -440,6 +453,7 @@ def __init__(self, brightness: float, contrast: float, saturation: float, hue: f self.contrast = contrast self.saturation = saturation self.hue = hue + warnings.warn(DeprecationWarning('PhotometricDistoration is deprecated and will be removed in v0.18')) def forward(self, image: Image.Image): if np.random.randint(2): @@ -490,6 +504,7 @@ def __init__(self, both_transforms: Optional[torch.nn.Module] = None, image_transforms: Optional[torch.nn.Module] = None, target_transforms: Optional[torch.nn.Module] = None): + warnings.warn(DeprecationWarning('ADE20k is deprecated and will be removed in v0.18')) super().__init__() self.datadir = datadir self.split = split diff --git a/composer/datasets/brats.py b/composer/datasets/brats.py index 096a4bd7e5..9356bdfb66 100644 --- a/composer/datasets/brats.py +++ b/composer/datasets/brats.py @@ -10,6 +10,7 @@ import glob import os import random +import warnings import numpy as np import torch @@ -36,6 +37,8 @@ def build_brats_dataloader(datadir: str, global_batch_size (int): Global batch size. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_brats_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') diff --git a/composer/datasets/c4.py b/composer/datasets/c4.py index a9eb1d6e85..4402134a98 100644 --- a/composer/datasets/c4.py +++ b/composer/datasets/c4.py @@ -7,6 +7,7 @@ `_ dataset. """ import logging +import warnings from typing import Any, Dict, Optional from torch.utils.data import DataLoader @@ -74,6 +75,7 @@ def build_streaming_c4_dataloader( Defaults to ``None``, which is interpreted as the number of nodes of the initial run. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_streaming_c4_dataloader is deprecated and will be removed in v0.18')) try: import transformers diff --git a/composer/datasets/cifar.py b/composer/datasets/cifar.py index 3a2aad45fc..f866c5be1b 100644 --- a/composer/datasets/cifar.py +++ b/composer/datasets/cifar.py @@ -9,6 +9,7 @@ import os import textwrap +import warnings from typing import Any, Dict, List, Optional, Union import torch @@ -52,6 +53,8 @@ def build_cifar10_dataloader( shuffle (bool): Shuffle the dataset. Default: ``True``. **dataloader_kwargs (Any): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_cifar10_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') @@ -119,6 +122,8 @@ def build_ffcv_cifar10_dataloader( ``False``. datadir (str | None, optional): Path to the non-FFCV data directory. """ + warnings.warn(DeprecationWarning('build_ffcv_cifar10_dataloader is deprecated and will be removed in v0.18')) + try: import ffcv from ffcv.fields.decoders import IntDecoder, SimpleRGBImageDecoder diff --git a/composer/datasets/ffcv_utils.py b/composer/datasets/ffcv_utils.py index 0a2bcc61dd..cdf7616d47 100644 --- a/composer/datasets/ffcv_utils.py +++ b/composer/datasets/ffcv_utils.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 import logging +import warnings from typing import Optional import numpy as np @@ -26,6 +27,7 @@ def _require_ffcv(): def ffcv_monkey_patches(): + warnings.warn(DeprecationWarning('ffcv_monkey_patches is deprecated and will be removed in v0.18')) _require_ffcv() # ffcv's __len__ function is expensive as it always calls self.next_traversal_order which does shuffling. @@ -62,6 +64,7 @@ def write_ffcv_dataset(dataset: Optional[Dataset] = None, jpeg_quality (float): Quality to use for jpeg compression. Default: ``90``. chunk_size (int): Size of chunks processed by each worker during conversion. Default: ``100``. """ + warnings.warn(DeprecationWarning('write_ffcv_dataset is deprecated and will be removed in v0.18')) _require_ffcv() if dataset is None: diff --git a/composer/datasets/imagenet.py b/composer/datasets/imagenet.py index 4533af208f..80d0f8c05d 100644 --- a/composer/datasets/imagenet.py +++ b/composer/datasets/imagenet.py @@ -8,6 +8,7 @@ """ import os +import warnings from typing import Any, Dict, List, Optional import numpy as np @@ -57,6 +58,8 @@ def build_imagenet_dataloader( crop size (int): The crop size to use. Default: ``224``. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_imagenet_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') @@ -126,6 +129,8 @@ def build_synthetic_imagenet_dataloader( shuffle (bool): whether to shuffle the dataset. Default: ``True``. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_synthetic_imagenet_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') @@ -166,6 +171,7 @@ def write_ffcv_imagenet( split (str): 'train' or 'val'. Default: ``train``. num_workers (int): Number of workers to use for conversion. Default: ``8``. """ + warnings.warn(DeprecationWarning('write_ffcv_imagenet is deprecated and will be removed in v0.18')) if dist.get_local_rank() == 0: ds = ImageFolder(os.path.join(datadir, split)) @@ -205,6 +211,8 @@ def build_ffcv_imagenet_dataloader( prefetch_factor (int): Number of batches to prefect. Default: ``2``. num_workers (int): Number of workers. Default: ``8``. """ + warnings.warn(DeprecationWarning('build_ffcv_imagenet_dataloader is deprecated and will be removed in v0.18')) + try: import ffcv from ffcv.fields.decoders import CenterCropRGBImageDecoder, IntDecoder, RandomResizedCropRGBImageDecoder @@ -317,6 +325,9 @@ def build_streaming_imagenet1k_dataloader( Defaults to ``None``, which is interpreted as the number of nodes of the initial run. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn( + DeprecationWarning('build_streaming_imagenet1k_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') diff --git a/composer/datasets/lm_dataset.py b/composer/datasets/lm_dataset.py index 1d2aed1ffc..aa752d0b2a 100644 --- a/composer/datasets/lm_dataset.py +++ b/composer/datasets/lm_dataset.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 import logging +import warnings from typing import List, cast from torch.utils.data import DataLoader, Dataset @@ -48,6 +49,8 @@ def build_lm_dataloader( ``1.0``. **dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_lm_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') diff --git a/composer/datasets/mnist.py b/composer/datasets/mnist.py index e54067cc39..4ec9601da4 100644 --- a/composer/datasets/mnist.py +++ b/composer/datasets/mnist.py @@ -1,6 +1,7 @@ # Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 +import warnings from typing import Any from torch.utils.data import DataLoader @@ -33,6 +34,8 @@ def build_mnist_dataloader( shuffle (bool): Shuffle the dataset. Default: ``True``. **dataloader_kwargs (Any): Additional settings for the dataloader (e.g. num_workers, etc.) """ + warnings.warn(DeprecationWarning('build_mnist_dataloader is deprecated and will be removed in v0.18')) + if global_batch_size % dist.get_world_size() != 0: raise ValueError( f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).') diff --git a/composer/datasets/synthetic.py b/composer/datasets/synthetic.py index a660f85946..37d2e0f52c 100644 --- a/composer/datasets/synthetic.py +++ b/composer/datasets/synthetic.py @@ -5,6 +5,7 @@ from __future__ import annotations +import warnings from typing import Callable, Optional, Sequence, Union import torch @@ -79,6 +80,8 @@ def __init__(self, device: str = 'cpu', memory_format: Union[str, MemoryFormat] = MemoryFormat.CONTIGUOUS_FORMAT, transform: Optional[Callable] = None): + warnings.warn(DeprecationWarning('SyntheticBatchPairDataset is deprecated and will be removed in v0.18')) + self.total_dataset_size = total_dataset_size self.data_shape = data_shape self.num_unique_samples_to_create = num_unique_samples_to_create @@ -187,6 +190,7 @@ def __init__(self, num_classes: Optional[int] = None, label_shape: Optional[Sequence[int]] = None, transform: Optional[Callable] = None): + warnings.warn(DeprecationWarning('SyntheticPILDataset is deprecated and will be removed in v0.18')) super().__init__(root='', transform=transform) self._dataset = SyntheticBatchPairDataset( total_dataset_size=total_dataset_size, diff --git a/composer/datasets/utils.py b/composer/datasets/utils.py index f873e5a860..9f6b2aac4e 100644 --- a/composer/datasets/utils.py +++ b/composer/datasets/utils.py @@ -5,6 +5,7 @@ import logging import textwrap +import warnings from typing import Callable, List, Tuple, Union import numpy as np @@ -44,6 +45,7 @@ def __init__(self, mean: Tuple[float, float, float], std: Tuple[float, float, float], ignore_background: bool = False): + warnings.warn(DeprecationWarning('NormalizationFn is deprecated and will be removed in v0.18')) self.mean = mean self.std = std self.ignore_background = ignore_background @@ -87,6 +89,7 @@ def pil_image_collate( (torch.Tensor, torch.Tensor): Tuple of (image tensor, target tensor) The image tensor will be four-dimensional (NCHW or NHWC, depending on the ``memory_format``). """ + warnings.warn(DeprecationWarning('pil_image_collate is deprecated and will be removed in v0.18')) imgs = [sample[0] for sample in batch] w, h = imgs[0].size image_tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format) @@ -137,6 +140,7 @@ def add_vision_dataset_transform(dataset: VisionDataset, transform: Callable, is Returns: None: The ``dataset`` is modified in-place. """ + warnings.warn(DeprecationWarning('add_vision_dataset_transform is deprecated and will be removed in v0.18')) transform_added_logstring = textwrap.dedent(f"""\ Transform {transform} added to dataset. diff --git a/composer/models/bert/model.py b/composer/models/bert/model.py index 5e1a64edb8..7c79ef109b 100644 --- a/composer/models/bert/model.py +++ b/composer/models/bert/model.py @@ -5,6 +5,7 @@ from __future__ import annotations +import warnings from typing import Optional from torchmetrics import MeanSquaredError @@ -70,6 +71,8 @@ def create_bert_mlm(use_pretrained: Optional[bool] = False, model = create_bert_mlm() """ + warnings.warn(DeprecationWarning('create_bert_mlm is deprecated and will be removed in v0.18')) + try: import transformers except ImportError as e: @@ -175,6 +178,8 @@ def create_bert_classification(num_labels: int = 2, For the classification case (when ``num_labels > 1``), the training loss is :class:`~torch.nn.CrossEntropyLoss`, and the train/validation metrics are :class:`~torchmetrics.MulticlassAccuracy` and :class:`~torchmetrics.MatthewsCorrCoef`, as well as :class:`.BinaryF1Score` if ``num_labels == 2``. """ + warnings.warn(DeprecationWarning('create_bert_classification is deprecated and will be removed in v0.18')) + try: import transformers except ImportError as e: diff --git a/composer/models/classify_mnist/model.py b/composer/models/classify_mnist/model.py index e16af30916..708196af5b 100644 --- a/composer/models/classify_mnist/model.py +++ b/composer/models/classify_mnist/model.py @@ -3,6 +3,7 @@ """A simple convolutional neural network extending :class:`.ComposerClassifier`.""" +import warnings from typing import List, Optional, Sequence, Union import torch @@ -19,6 +20,7 @@ class Model(nn.Module): """Toy convolutional neural network architecture in pytorch for MNIST.""" def __init__(self, initializers: Sequence[Union[str, Initializer]], num_classes: int = 10): + warnings.warn(DeprecationWarning('Model is deprecated and will be removed in v0.18')) super().__init__() self.num_classes = num_classes @@ -65,6 +67,7 @@ def mnist_model(num_classes: int = 10, initializers: Optional[List[Initializer]] model = mnist_model() """ + warnings.warn(DeprecationWarning('mnist_model is deprecated and will be removed in v0.18')) if initializers is None: initializers = [] diff --git a/composer/models/deeplabv3/model.py b/composer/models/deeplabv3/model.py index 4d1ff6d591..7e58847708 100644 --- a/composer/models/deeplabv3/model.py +++ b/composer/models/deeplabv3/model.py @@ -28,6 +28,8 @@ class SimpleSegmentationModel(torch.nn.Module): def __init__(self, backbone, classifier): + warnings.warn(DeprecationWarning('SimpleSegmentationModel is deprecated and will be removed in v0.18')) + super().__init__() self.backbone = backbone self.classifier = classifier @@ -75,6 +77,7 @@ def deeplabv3(num_classes: int, pytorch_model = deeplabv3(num_classes=150, backbone_arch='resnet101', backbone_weights=None) """ + warnings.warn(DeprecationWarning('deeplabv3 is deprecated and will be removed in v0.18')) # check that the specified architecture is in the resnet module if not hasattr(resnet, backbone_arch): @@ -217,6 +220,7 @@ def composer_deeplabv3(num_classes: int, model = composer_deeplabv3(num_classes=150, backbone_arch='resnet101', backbone_weights=None) """ + warnings.warn(DeprecationWarning('composer_deeplabv3 is deprecated and will be removed in v0.18')) model = deeplabv3(backbone_arch=backbone_arch, backbone_weights=backbone_weights, diff --git a/composer/models/efficientnetb0/efficientnets.py b/composer/models/efficientnetb0/efficientnets.py index 2b4e6bb1a5..7c544a5143 100644 --- a/composer/models/efficientnetb0/efficientnets.py +++ b/composer/models/efficientnetb0/efficientnets.py @@ -8,6 +8,7 @@ import math import re +import warnings from typing import Callable, Optional import torch @@ -59,6 +60,8 @@ def __init__(self, act_layer: Callable[..., nn.Module] = nn.SiLU, norm_kwargs: Optional[dict] = None, norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d): + warnings.warn(DeprecationWarning('EfficientNet is deprecated and will be removed in v0.18')) + super(EfficientNet, self).__init__() self.num_classes = num_classes diff --git a/composer/models/efficientnetb0/model.py b/composer/models/efficientnetb0/model.py index b6f177ad85..67ae193895 100644 --- a/composer/models/efficientnetb0/model.py +++ b/composer/models/efficientnetb0/model.py @@ -2,6 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 """A :class:`.ComposerClassifier` wrapper around the EfficientNet-b0 architecture.""" + +import warnings + from composer.models.efficientnetb0.efficientnets import EfficientNet from composer.models.tasks import ComposerClassifier @@ -31,6 +34,7 @@ def composer_efficientnetb0(num_classes: int = 1000, drop_connect_rate: float = model = composer_efficientnetb0() # creates EfficientNet-b0 for image classification """ + warnings.warn(DeprecationWarning('composer_efficientnetb0 is deprecated and will be removed in v0.18')) model = EfficientNet.get_model_from_name(model_name='efficientnet-b0', num_classes=num_classes, drop_connect_rate=drop_connect_rate) diff --git a/composer/models/gpt2/model.py b/composer/models/gpt2/model.py index ebb2606baa..ea924b7b99 100644 --- a/composer/models/gpt2/model.py +++ b/composer/models/gpt2/model.py @@ -8,6 +8,7 @@ from __future__ import annotations +import warnings from typing import Optional from composer.metrics.nlp import LanguageCrossEntropy, LanguagePerplexity @@ -83,6 +84,8 @@ def create_gpt2(use_pretrained: Optional[bool] = False, composer_model = create_gpt2() """ + warnings.warn(DeprecationWarning('create_gpt2 is deprecated and will be removed in v0.18')) + try: import transformers except ImportError as e: diff --git a/composer/models/mmdetection.py b/composer/models/mmdetection.py index 837b991cf3..2e53aac543 100644 --- a/composer/models/mmdetection.py +++ b/composer/models/mmdetection.py @@ -5,6 +5,7 @@ from __future__ import annotations +import warnings from typing import TYPE_CHECKING, Any, List, Optional import numpy as np @@ -56,6 +57,7 @@ def __init__( self, model: mmdet.models.detectors.BaseDetector, # type: ignore metrics: Optional[List[Metric]] = None) -> None: + warnings.warn(DeprecationWarning('MMDetModel is deprecated and will be removed in v0.18')) super().__init__() self.model = model diff --git a/composer/models/resnet/model.py b/composer/models/resnet/model.py index 621fe772b9..5b023fabcf 100644 --- a/composer/models/resnet/model.py +++ b/composer/models/resnet/model.py @@ -4,6 +4,7 @@ """A :class:`.ComposerClassifier` wrapper around the torchvision implementations of the ResNet model family.""" import logging +import warnings from typing import List, Optional from torchmetrics import MetricCollection @@ -57,6 +58,7 @@ def composer_resnet(model_name: str, model = composer_resnet(model_name='resnet18') # creates a torchvision resnet18 for image classification """ + warnings.warn(DeprecationWarning('composer_resnet is deprecated and will be removed in v0.18')) valid_model_names = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] if model_name not in valid_model_names: diff --git a/composer/models/resnet_cifar/model.py b/composer/models/resnet_cifar/model.py index 59e471d8bd..5bb8660b56 100644 --- a/composer/models/resnet_cifar/model.py +++ b/composer/models/resnet_cifar/model.py @@ -3,6 +3,7 @@ """ResNet models for CIFAR extending :class:`.ComposerClassifier`.""" +import warnings from typing import List, Optional from composer.models.initializers import Initializer @@ -39,6 +40,7 @@ def composer_resnet_cifar(model_name: str, .. _blog: https://myrtle.ai/learn/how-to-train-your-resnet-4-architecture/ """ + warnings.warn(DeprecationWarning('composer_resnet_cifar is deprecated and will be removed in v0.18')) if initializers is None: initializers = [] diff --git a/composer/models/timm/model.py b/composer/models/timm/model.py index 8414646a9e..df0ffbca91 100644 --- a/composer/models/timm/model.py +++ b/composer/models/timm/model.py @@ -4,6 +4,7 @@ """A wrapper around `timm.create_model() `_ used to create :class:`.ComposerClassifier`.""" +import warnings from typing import Optional from composer.models.tasks import ComposerClassifier @@ -47,6 +48,7 @@ def composer_timm(model_name: str, model = composer_timm(model_name='resnet18') # creates a timm resnet18 """ + warnings.warn(DeprecationWarning('composer_timm is deprecated and will be removed in v0.18')) try: import timm except ImportError as e: diff --git a/composer/models/unet/model.py b/composer/models/unet/model.py index 8b1c6a0c1e..08c49ff57c 100644 --- a/composer/models/unet/model.py +++ b/composer/models/unet/model.py @@ -6,6 +6,8 @@ See the :doc:`Model Card ` for more details. """ +import warnings + import torch.nn as nn from composer.models.unet._layers import ConvBlock, OutputBlock, ResidBlock, UpsampleBlock @@ -40,6 +42,7 @@ def __init__( residual, dimension, ): + warnings.warn(DeprecationWarning('UNet is deprecated and will be removed in v0.18')) super(UNet, self).__init__() self.dim = dimension self.n_class = n_class diff --git a/composer/models/unet/unet.py b/composer/models/unet/unet.py index 80241ac351..dde555bb4f 100644 --- a/composer/models/unet/unet.py +++ b/composer/models/unet/unet.py @@ -4,6 +4,7 @@ """A U-Net model extending :class:`.ComposerModel`.""" import logging +import warnings from typing import Any, Dict, Optional, Sequence, Union import torch @@ -33,6 +34,8 @@ class UNet(ComposerModel): """ def __init__(self, num_classes: int = 3) -> None: + warnings.warn(DeprecationWarning('UNet is deprecated and will be removed in v0.18')) + super().__init__() try: from monai.losses import DiceLoss diff --git a/composer/models/vit_small_patch16/model.py b/composer/models/vit_small_patch16/model.py index 2bd6a8e6cd..dacb9db56a 100644 --- a/composer/models/vit_small_patch16/model.py +++ b/composer/models/vit_small_patch16/model.py @@ -3,6 +3,8 @@ """Implements ViT-S/16 as a :class:`.ComposerClassifier`.""" +import warnings + from composer.models.tasks import ComposerClassifier __all__ = ['vit_small_patch16'] @@ -29,6 +31,7 @@ def vit_small_patch16(num_classes: int = 1000, Returns: ComposerModel: instance of :class:`.ComposerClassifier` with a ViT-S/16 model. """ + warnings.warn(DeprecationWarning('vit_small_patch16 is deprecated and will be removed in v0.18')) from vit_pytorch import ViT model = ViT(