Skip to content

Commit

Permalink
polish: update pre-commit hooks; add: csv and markdown logging
Browse files Browse the repository at this point in the history
  • Loading branch information
ryanxingql committed Oct 8, 2024
1 parent 26d1705 commit 28e02e4
Show file tree
Hide file tree
Showing 29 changed files with 556 additions and 746 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/sync_to_private.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ jobs:
- name: Sync to private repository
run: |
git remote add private git@github.com:ryanxingql/${{ secrets.PRIVATE_REPO_NAME }}.git
git push private basicsr-based-dev --force
git push private basicsr-based-dev --force
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -168,4 +168,4 @@ cython_debug/
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.idea/
34 changes: 31 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,37 @@
repos:
- repo: https://github.com/psf/black
rev: 24.8.0
# flake8
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: black
- id: flake8

# yapf
- repo: https://github.com/google/yapf
rev: v0.40.2
hooks:
- id: yapf

# isort
- repo: https://github.com/timothycrosley/isort
rev: 5.13.2
hooks:
- id: isort

# codespell
- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
hooks:
- id: codespell

# pre-commit-hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace # Trim trailing whitespace
- id: check-yaml # Attempt to load all yaml files to verify syntax
- id: check-merge-conflict # Check for files that contain merge conflict strings
- id: double-quote-string-fixer # Replace double-quoted strings with single quoted strings
- id: end-of-file-fixer # Make sure files end in a newline and only a newline
- id: requirements-txt-fixer # Sort entries in requirements.txt and remove incorrect entry for pkg-resources==0.0.0
- id: mixed-line-ending # Replace or check mixed line ending
args: ["--fix=lf"]
2 changes: 1 addition & 1 deletion basicsr
27 changes: 13 additions & 14 deletions powerqe/archs/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from copy import deepcopy

from basicsr.utils import get_root_logger

from .arcnn_arch import ARCNN
from .cbdnet_arch import CBDNet
from .dcad_arch import DCAD
Expand All @@ -14,24 +13,24 @@
from .unet_arch import UNet

__all__ = [
"ARCNN",
"CBDNet",
"DCAD",
"DnCNN",
"IdentityNet",
"MPRNet",
"RBQE",
"RDN",
"build_network",
"ARCH_REGISTRY",
"UNet",
'ARCNN',
'CBDNet',
'DCAD',
'DnCNN',
'IdentityNet',
'MPRNet',
'RBQE',
'RDN',
'build_network',
'ARCH_REGISTRY',
'UNet',
]


def build_network(opt):
opt = deepcopy(opt)
network_type = opt.pop("type")
network_type = opt.pop('type')
net = ARCH_REGISTRY.get(network_type)(**opt)
logger = get_root_logger()
logger.info(f"Network [{net.__class__.__name__}] is created.")
logger.info(f'Network [{net.__class__.__name__}] is created.')
return net
4 changes: 1 addition & 3 deletions powerqe/archs/arcnn_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@ def __init__(
super().__init__()

self.layers = nn.Sequential(
nn.Conv2d(
io_channels, mid_channels_1, in_kernel_size, padding=in_kernel_size // 2
),
nn.Conv2d(io_channels, mid_channels_1, in_kernel_size, padding=in_kernel_size // 2),
nn.ReLU(inplace=False),
nn.Conv2d(
mid_channels_1,
Expand Down
48 changes: 21 additions & 27 deletions powerqe/archs/cbdnet_arch.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import torch
from torch import nn as nn

from .unet_arch import UNet
from .registry import ARCH_REGISTRY
from .unet_arch import UNet


@ARCH_REGISTRY.register()
Expand Down Expand Up @@ -31,41 +31,35 @@ def __init__(
nf_gr_denoise=2,
nl_base_denoise=1,
nl_gr_denoise=2,
down_denoise="avepool2d",
up_denoise="transpose2d",
reduce_denoise="add",
down_denoise='avepool2d',
up_denoise='transpose2d',
reduce_denoise='add',
):
super().__init__()

estimate_list = nn.ModuleList(
[
estimate_list = nn.ModuleList([
nn.Conv2d(
in_channels=io_channels,
out_channels=estimate_channels,
kernel_size=3,
padding=3 // 2,
),
nn.ReLU(inplace=True),
])
for _ in range(3):
estimate_list += nn.ModuleList([
nn.Conv2d(
in_channels=io_channels,
in_channels=estimate_channels,
out_channels=estimate_channels,
kernel_size=3,
padding=3 // 2,
),
nn.ReLU(inplace=True),
]
)
for _ in range(3):
estimate_list += nn.ModuleList(
[
nn.Conv2d(
in_channels=estimate_channels,
out_channels=estimate_channels,
kernel_size=3,
padding=3 // 2,
),
nn.ReLU(inplace=True),
]
)
estimate_list += nn.ModuleList(
[
nn.Conv2d(estimate_channels, io_channels, 3, padding=3 // 2),
nn.ReLU(inplace=True),
]
)
])
estimate_list += nn.ModuleList([
nn.Conv2d(estimate_channels, io_channels, 3, padding=3 // 2),
nn.ReLU(inplace=True),
])
self.estimate = nn.Sequential(*estimate_list)

self.denoise = UNet(
Expand Down
4 changes: 1 addition & 3 deletions powerqe/archs/dncnn_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,7 @@ def __init__(self, io_channels=3, mid_channels=64, num_blocks=15, if_bn=False):
layers += [
# bias is unnecessary and off due to the following BN
nn.Conv2d(mid_channels, mid_channels, 3, padding=1, bias=False),
nn.BatchNorm2d(
num_features=mid_channels, momentum=0.9, eps=1e-04, affine=True
),
nn.BatchNorm2d(num_features=mid_channels, momentum=0.9, eps=1e-04, affine=True),
]
else:
layers.append(nn.Conv2d(mid_channels, mid_channels, 3, padding=1))
Expand Down
2 changes: 1 addition & 1 deletion powerqe/archs/identitynet_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
class IdentityNet(nn.Module):
"""Identity network used for testing benchmarks (in tensors). Support up-scaling."""

def __init__(self, scale=1, upscale_mode="nearest"):
def __init__(self, scale=1, upscale_mode='nearest'):
super().__init__()
self.scale = scale
self.upscale_mode = upscale_mode
Expand Down
Loading

0 comments on commit 28e02e4

Please sign in to comment.