Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

can't run with cpu or mps #6

Open
i18nsite opened this issue Nov 17, 2024 · 0 comments
Open

can't run with cpu or mps #6

i18nsite opened this issue Nov 17, 2024 · 0 comments

Comments

@i18nsite
Copy link

i18nsite commented Nov 17, 2024

/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/bitsandbytes/cextension.py:34: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.
  warn("The installed version of bitsandbytes was compiled without GPU support. "
/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/mmengine/optim/optimizer/zero_optimizer.py:11: DeprecationWarning: `TorchScript` support for functional optimizers is deprecated and will be removed in a future PyTorch release. Consider using the `torch.compile` optimizer instead.
  from torch.distributed.optim import \
/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/functional.py:534: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/TensorShape.cpp:3596.)
  return _VF.meshgrid(tensors, **kwargs)  # type: ignore[attr-defined]
'NoneType' object has no attribute 'cadam32bit_grad_fp32'
cpu
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Dropout(p=0.0, inplace=False)
Dropout(p=0.0, inplace=False)
Linear(in_features=512, out_features=512, bias=True)
Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
SparseWindowAttention(
  (key): Linear(in_features=512, out_features=512, bias=True)
  (query): Linear(in_features=512, out_features=512, bias=True)
  (value): Linear(in_features=512, out_features=512, bias=True)
  (attn_drop): Dropout(p=0.0, inplace=False)
  (proj_drop): Dropout(p=0.0, inplace=False)
  (proj): Linear(in_features=512, out_features=512, bias=True)
  (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
Linear(in_features=512, out_features=1000, bias=True)
Sequential(
  (0): Linear(in_features=512, out_features=1000, bias=True)
)
GELU(approximate='none')
Linear(in_features=1000, out_features=512, bias=True)
Sequential(
  (0): GELU(approximate='none')
  (1): Linear(in_features=1000, out_features=512, bias=True)
)
FusionFeedForward(
  (fc1): Sequential(
    (0): Linear(in_features=512, out_features=1000, bias=True)
  )
  (fc2): Sequential(
    (0): GELU(approximate='none')
    (1): Linear(in_features=1000, out_features=512, bias=True)
  )
)
TemporalSparseTransformer(
  (attention): SparseWindowAttention(
    (key): Linear(in_features=512, out_features=512, bias=True)
    (query): Linear(in_features=512, out_features=512, bias=True)
    (value): Linear(in_features=512, out_features=512, bias=True)
    (attn_drop): Dropout(p=0.0, inplace=False)
    (proj_drop): Dropout(p=0.0, inplace=False)
    (proj): Linear(in_features=512, out_features=512, bias=True)
    (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
  )
  (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (mlp): FusionFeedForward(
    (fc1): Sequential(
      (0): Linear(in_features=512, out_features=1000, bias=True)
    )
    (fc2): Sequential(
      (0): GELU(approximate='none')
      (1): Linear(in_features=1000, out_features=512, bias=True)
    )
  )
)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Dropout(p=0.0, inplace=False)
Dropout(p=0.0, inplace=False)
Linear(in_features=512, out_features=512, bias=True)
Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
SparseWindowAttention(
  (key): Linear(in_features=512, out_features=512, bias=True)
  (query): Linear(in_features=512, out_features=512, bias=True)
  (value): Linear(in_features=512, out_features=512, bias=True)
  (attn_drop): Dropout(p=0.0, inplace=False)
  (proj_drop): Dropout(p=0.0, inplace=False)
  (proj): Linear(in_features=512, out_features=512, bias=True)
  (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
Linear(in_features=512, out_features=1000, bias=True)
Sequential(
  (0): Linear(in_features=512, out_features=1000, bias=True)
)
GELU(approximate='none')
Linear(in_features=1000, out_features=512, bias=True)
Sequential(
  (0): GELU(approximate='none')
  (1): Linear(in_features=1000, out_features=512, bias=True)
)
FusionFeedForward(
  (fc1): Sequential(
    (0): Linear(in_features=512, out_features=1000, bias=True)
  )
  (fc2): Sequential(
    (0): GELU(approximate='none')
    (1): Linear(in_features=1000, out_features=512, bias=True)
  )
)
TemporalSparseTransformer(
  (attention): SparseWindowAttention(
    (key): Linear(in_features=512, out_features=512, bias=True)
    (query): Linear(in_features=512, out_features=512, bias=True)
    (value): Linear(in_features=512, out_features=512, bias=True)
    (attn_drop): Dropout(p=0.0, inplace=False)
    (proj_drop): Dropout(p=0.0, inplace=False)
    (proj): Linear(in_features=512, out_features=512, bias=True)
    (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
  )
  (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (mlp): FusionFeedForward(
    (fc1): Sequential(
      (0): Linear(in_features=512, out_features=1000, bias=True)
    )
    (fc2): Sequential(
      (0): GELU(approximate='none')
      (1): Linear(in_features=1000, out_features=512, bias=True)
    )
  )
)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Dropout(p=0.0, inplace=False)
Dropout(p=0.0, inplace=False)
Linear(in_features=512, out_features=512, bias=True)
Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
SparseWindowAttention(
  (key): Linear(in_features=512, out_features=512, bias=True)
  (query): Linear(in_features=512, out_features=512, bias=True)
  (value): Linear(in_features=512, out_features=512, bias=True)
  (attn_drop): Dropout(p=0.0, inplace=False)
  (proj_drop): Dropout(p=0.0, inplace=False)
  (proj): Linear(in_features=512, out_features=512, bias=True)
  (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
Linear(in_features=512, out_features=1000, bias=True)
Sequential(
  (0): Linear(in_features=512, out_features=1000, bias=True)
)
GELU(approximate='none')
Linear(in_features=1000, out_features=512, bias=True)
Sequential(
  (0): GELU(approximate='none')
  (1): Linear(in_features=1000, out_features=512, bias=True)
)
FusionFeedForward(
  (fc1): Sequential(
    (0): Linear(in_features=512, out_features=1000, bias=True)
  )
  (fc2): Sequential(
    (0): GELU(approximate='none')
    (1): Linear(in_features=1000, out_features=512, bias=True)
  )
)
TemporalSparseTransformer(
  (attention): SparseWindowAttention(
    (key): Linear(in_features=512, out_features=512, bias=True)
    (query): Linear(in_features=512, out_features=512, bias=True)
    (value): Linear(in_features=512, out_features=512, bias=True)
    (attn_drop): Dropout(p=0.0, inplace=False)
    (proj_drop): Dropout(p=0.0, inplace=False)
    (proj): Linear(in_features=512, out_features=512, bias=True)
    (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
  )
  (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (mlp): FusionFeedForward(
    (fc1): Sequential(
      (0): Linear(in_features=512, out_features=1000, bias=True)
    )
    (fc2): Sequential(
      (0): GELU(approximate='none')
      (1): Linear(in_features=1000, out_features=512, bias=True)
    )
  )
)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Linear(in_features=512, out_features=512, bias=True)
Dropout(p=0.0, inplace=False)
Dropout(p=0.0, inplace=False)
Linear(in_features=512, out_features=512, bias=True)
Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
SparseWindowAttention(
  (key): Linear(in_features=512, out_features=512, bias=True)
  (query): Linear(in_features=512, out_features=512, bias=True)
  (value): Linear(in_features=512, out_features=512, bias=True)
  (attn_drop): Dropout(p=0.0, inplace=False)
  (proj_drop): Dropout(p=0.0, inplace=False)
  (proj): Linear(in_features=512, out_features=512, bias=True)
  (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
LayerNorm((512,), eps=1e-05, elementwise_affine=True)
Linear(in_features=512, out_features=1000, bias=True)
Sequential(
  (0): Linear(in_features=512, out_features=1000, bias=True)
)
GELU(approximate='none')
Linear(in_features=1000, out_features=512, bias=True)
Sequential(
  (0): GELU(approximate='none')
  (1): Linear(in_features=1000, out_features=512, bias=True)
)
FusionFeedForward(
  (fc1): Sequential(
    (0): Linear(in_features=512, out_features=1000, bias=True)
  )
  (fc2): Sequential(
    (0): GELU(approximate='none')
    (1): Linear(in_features=1000, out_features=512, bias=True)
  )
)
TemporalSparseTransformer(
  (attention): SparseWindowAttention(
    (key): Linear(in_features=512, out_features=512, bias=True)
    (query): Linear(in_features=512, out_features=512, bias=True)
    (value): Linear(in_features=512, out_features=512, bias=True)
    (attn_drop): Dropout(p=0.0, inplace=False)
    (proj_drop): Dropout(p=0.0, inplace=False)
    (proj): Linear(in_features=512, out_features=512, bias=True)
    (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
  )
  (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
  (mlp): FusionFeedForward(
    (fc1): Sequential(
      (0): Linear(in_features=512, out_features=1000, bias=True)
    )
    (fc2): Sequential(
      (0): GELU(approximate='none')
      (1): Linear(in_features=1000, out_features=512, bias=True)
    )
  )
)
Sequential(
  (0): TemporalSparseTransformer(
    (attention): SparseWindowAttention(
      (key): Linear(in_features=512, out_features=512, bias=True)
      (query): Linear(in_features=512, out_features=512, bias=True)
      (value): Linear(in_features=512, out_features=512, bias=True)
      (attn_drop): Dropout(p=0.0, inplace=False)
      (proj_drop): Dropout(p=0.0, inplace=False)
      (proj): Linear(in_features=512, out_features=512, bias=True)
      (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
    )
    (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (mlp): FusionFeedForward(
      (fc1): Sequential(
        (0): Linear(in_features=512, out_features=1000, bias=True)
      )
      (fc2): Sequential(
        (0): GELU(approximate='none')
        (1): Linear(in_features=1000, out_features=512, bias=True)
      )
    )
  )
  (1): TemporalSparseTransformer(
    (attention): SparseWindowAttention(
      (key): Linear(in_features=512, out_features=512, bias=True)
      (query): Linear(in_features=512, out_features=512, bias=True)
      (value): Linear(in_features=512, out_features=512, bias=True)
      (attn_drop): Dropout(p=0.0, inplace=False)
      (proj_drop): Dropout(p=0.0, inplace=False)
      (proj): Linear(in_features=512, out_features=512, bias=True)
      (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
    )
    (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (mlp): FusionFeedForward(
      (fc1): Sequential(
        (0): Linear(in_features=512, out_features=1000, bias=True)
      )
      (fc2): Sequential(
        (0): GELU(approximate='none')
        (1): Linear(in_features=1000, out_features=512, bias=True)
      )
    )
  )
  (2): TemporalSparseTransformer(
    (attention): SparseWindowAttention(
      (key): Linear(in_features=512, out_features=512, bias=True)
      (query): Linear(in_features=512, out_features=512, bias=True)
      (value): Linear(in_features=512, out_features=512, bias=True)
      (attn_drop): Dropout(p=0.0, inplace=False)
      (proj_drop): Dropout(p=0.0, inplace=False)
      (proj): Linear(in_features=512, out_features=512, bias=True)
      (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
    )
    (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (mlp): FusionFeedForward(
      (fc1): Sequential(
        (0): Linear(in_features=512, out_features=1000, bias=True)
      )
      (fc2): Sequential(
        (0): GELU(approximate='none')
        (1): Linear(in_features=1000, out_features=512, bias=True)
      )
    )
  )
  (3): TemporalSparseTransformer(
    (attention): SparseWindowAttention(
      (key): Linear(in_features=512, out_features=512, bias=True)
      (query): Linear(in_features=512, out_features=512, bias=True)
      (value): Linear(in_features=512, out_features=512, bias=True)
      (attn_drop): Dropout(p=0.0, inplace=False)
      (proj_drop): Dropout(p=0.0, inplace=False)
      (proj): Linear(in_features=512, out_features=512, bias=True)
      (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
    )
    (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
    (mlp): FusionFeedForward(
      (fc1): Sequential(
        (0): Linear(in_features=512, out_features=1000, bias=True)
      )
      (fc2): Sequential(
        (0): GELU(approximate='none')
        (1): Linear(in_features=1000, out_features=512, bias=True)
      )
    )
  )
)
TemporalSparseTransformerBlock(
  (transformer): Sequential(
    (0): TemporalSparseTransformer(
      (attention): SparseWindowAttention(
        (key): Linear(in_features=512, out_features=512, bias=True)
        (query): Linear(in_features=512, out_features=512, bias=True)
        (value): Linear(in_features=512, out_features=512, bias=True)
        (attn_drop): Dropout(p=0.0, inplace=False)
        (proj_drop): Dropout(p=0.0, inplace=False)
        (proj): Linear(in_features=512, out_features=512, bias=True)
        (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
      )
      (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (mlp): FusionFeedForward(
        (fc1): Sequential(
          (0): Linear(in_features=512, out_features=1000, bias=True)
        )
        (fc2): Sequential(
          (0): GELU(approximate='none')
          (1): Linear(in_features=1000, out_features=512, bias=True)
        )
      )
    )
    (1): TemporalSparseTransformer(
      (attention): SparseWindowAttention(
        (key): Linear(in_features=512, out_features=512, bias=True)
        (query): Linear(in_features=512, out_features=512, bias=True)
        (value): Linear(in_features=512, out_features=512, bias=True)
        (attn_drop): Dropout(p=0.0, inplace=False)
        (proj_drop): Dropout(p=0.0, inplace=False)
        (proj): Linear(in_features=512, out_features=512, bias=True)
        (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
      )
      (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (mlp): FusionFeedForward(
        (fc1): Sequential(
          (0): Linear(in_features=512, out_features=1000, bias=True)
        )
        (fc2): Sequential(
          (0): GELU(approximate='none')
          (1): Linear(in_features=1000, out_features=512, bias=True)
        )
      )
    )
    (2): TemporalSparseTransformer(
      (attention): SparseWindowAttention(
        (key): Linear(in_features=512, out_features=512, bias=True)
        (query): Linear(in_features=512, out_features=512, bias=True)
        (value): Linear(in_features=512, out_features=512, bias=True)
        (attn_drop): Dropout(p=0.0, inplace=False)
        (proj_drop): Dropout(p=0.0, inplace=False)
        (proj): Linear(in_features=512, out_features=512, bias=True)
        (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
      )
      (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (mlp): FusionFeedForward(
        (fc1): Sequential(
          (0): Linear(in_features=512, out_features=1000, bias=True)
        )
        (fc2): Sequential(
          (0): GELU(approximate='none')
          (1): Linear(in_features=1000, out_features=512, bias=True)
        )
      )
    )
    (3): TemporalSparseTransformer(
      (attention): SparseWindowAttention(
        (key): Linear(in_features=512, out_features=512, bias=True)
        (query): Linear(in_features=512, out_features=512, bias=True)
        (value): Linear(in_features=512, out_features=512, bias=True)
        (attn_drop): Dropout(p=0.0, inplace=False)
        (proj_drop): Dropout(p=0.0, inplace=False)
        (proj): Linear(in_features=512, out_features=512, bias=True)
        (pool_layer): Conv2d(512, 512, kernel_size=(4, 4), stride=(4, 4), groups=512)
      )
      (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      (mlp): FusionFeedForward(
        (fc1): Sequential(
          (0): Linear(in_features=512, out_features=1000, bias=True)
        )
        (fc2): Sequential(
          (0): GELU(approximate='none')
          (1): Linear(in_features=1000, out_features=512, bias=True)
        )
      )
    )
  )
)
Traceback (most recent call last):
  File "/Users/z/git/flux/BSSTNet/./deblur.py", line 71, in <module>
    deblurrer = ImageDeblurrer(config_path, model_path)
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/z/git/flux/BSSTNet/./deblur.py", line 31, in __init__
    self.model = build_model(opt).to(self.device)
                 ^^^^^^^^^^^^^^^^
  File "/Users/z/git/flux/BSSTNet/basicsr/models/__init__.py", line 26, in build_model
    model = MODEL_REGISTRY.get(opt['model_type'])(opt)
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/z/git/flux/BSSTNet/basicsr/models/Video_BSST_model.py", line 45, in __init__
    self.net_g = self.model10_to_device(self.net_g)
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/z/git/flux/BSSTNet/basicsr/models/Video_BSST_model.py", line 118, in model10_to_device
    net = net.to(self.device)
          ^^^^^^^^^^^^^^^^^^^
  File "/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1340, in to
    return self._apply(convert)
           ^^^^^^^^^^^^^^^^^^^^
  File "/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/nn/modules/module.py", line 900, in _apply
    module._apply(fn)
  File "/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/nn/modules/module.py", line 900, in _apply
    module._apply(fn)
  File "/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/nn/modules/module.py", line 927, in _apply
    param_applied = fn(param)
                    ^^^^^^^^^
  File "/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1326, in convert
    return t.to(
           ^^^^^
  File "/Users/z/.local/share/mise/installs/python/3.12.7/lib/python3.12/site-packages/torch/cuda/__init__.py", line 310, in _lazy_init
    raise AssertionError("Torch not compiled with CUDA enabled")
AssertionError: Torch not compiled with CUDA enabled

#!/usr/bin/env python

import yaml
import os
import torch
from PIL import Image
from basicsr.models import build_model


class ImageDeblurrer:
  """
  图像去模糊类:加载模型和配置文件,支持批量处理图像。
  """

  def __init__(self, config_path: str, model_path: str):
    """
    初始化图像去模糊器,加载配置和模型权重。

    Args:
        config_path (str): 模型配置文件路径。
        model_path (str): 模型权重文件路径。
    """
    # self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
    self.device = torch.device("cpu")
    print(self.device)
    with open(config_path, "r") as f:
      opt = yaml.safe_load(f)

    # opt["path"]["pretrain_network_g"] = model_path
    opt["is_train"] = False
    self.model = build_model(opt).to(self.device)
    self.model.compile()

  def deblur_image(self, image_path: str) -> Image.Image:
    """
    对输入图像进行去模糊处理。

    Args:
        image_path (str): 输入图像路径。

    Returns:
        Image.Image: 去模糊后的图像。
    """
    if not os.path.exists(image_path):
      raise FileNotFoundError(f"Image file not found: {image_path}")

    # 加载图像
    input_image = Image.open(image_path).convert("RGB")
    input_tensor = (
      torch.tensor(input_image, dtype=torch.float32)
      .permute(2, 0, 1)
      .unsqueeze(0)
      .to(self.device)
    )

    # 推理
    with torch.no_grad():
      output_tensor = self.model.test(input_tensor)

    # 转换结果
    output_tensor = output_tensor.squeeze(0).permute(1, 2, 0).cpu().clamp(0, 255).byte()
    output_image = Image.fromarray(output_tensor.numpy())

    return output_image


if __name__ == "__main__":
  config_path = "options/test/BSST/dvd_BSST.yml"  # 替换为需要的配置文件路径
  model_path = "model_zoos/BSST_dvd.pth"  # 替换为对应的模型权重路径

  deblurrer = ImageDeblurrer(config_path, model_path)

  test_images = ["../test/1.jpg"]
  output_images = ["../test/BSSTNet-1.jpg"]

  for input_path, output_path in zip(test_images, output_images):
    try:
      deblurred_image = deblurrer.deblur_image(input_path)
      deblurred_image.save(output_path)
      print(f"去模糊图像已保存至: {output_path}")
    except Exception as e:
      print(f"处理图像 {input_path} 时发生错误: {e}")

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant