Skip to content

Commit

Permalink
Merge pull request #945 from zariiii9003/ruff_rules
Browse files Browse the repository at this point in the history
Activate ruff-specific rules (RUF)
  • Loading branch information
danielhrisca authored Dec 13, 2023
2 parents 312bd1b + 7589e2e commit 48a4dde
Show file tree
Hide file tree
Showing 30 changed files with 315 additions and 343 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
repos:
- repo: https://github.com/ambv/black
rev: 23.11.0
rev: 23.12.0
hooks:
- id: black
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: v0.1.6
rev: v0.1.7
hooks:
- id: ruff
6 changes: 5 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,18 @@ skip = "pp* *_ppc64le *-musllinux* *_s390x" # skip pypy and irrelevant archite
[tool.ruff]
select = [
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"F", # pyflakes
"UP", # pyupgrade
"I", # isort
"PIE", # flake8-pie
"RUF", # Ruff-specific rules
]
ignore = [
"B007", # unused-loop-control-variable
"F401", # unused-import
"F841", # unused-variable
"RUF012", # mutable-class-default
"RUF015", # unnecessary-iterable-allocation-for-first-element
]
exclude = ["./src/asammdf/gui/ui"]
target-version = "py38"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def _get_ext_modules():
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
Expand Down
2 changes: 1 addition & 1 deletion src/asammdf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from .version import __version__

try:
from .blocks import cutils
from .blocks import cutils # noqa: F401

__cextension__ = True
except ImportError:
Expand Down
4 changes: 2 additions & 2 deletions src/asammdf/blocks/mdf_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -3009,7 +3009,7 @@ def get(
size = byte_size

vals_dtype = vals.dtype.kind
if vals_dtype not in "ui" and (bit_offset or not bits == size * 8):
if vals_dtype not in "ui" and (bit_offset or bits != size * 8):
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
else:
dtype_ = vals.dtype
Expand Down Expand Up @@ -3285,7 +3285,7 @@ def get_master(
if time_b:
t += time_b

if not t.dtype == float64:
if t.dtype != float64:
t = t.astype(float64)

self._master_channel_metadata[index] = metadata
Expand Down
9 changes: 4 additions & 5 deletions src/asammdf/blocks/mdf_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@
COMMON_SHORT_u = v4c.COMMON_SHORT_u
VALID_DATA_TYPES = v4c.VALID_DATA_TYPES

EMPTY_TUPLE = tuple()
EMPTY_TUPLE = ()

# 100 extra steps for the sorting, 1 step after sorting and 1 step at finish
SORT_STEPS = 102
Expand Down Expand Up @@ -4443,7 +4443,7 @@ def _append_dataframe(
acq_name: str | None = None,
acq_source: Source | None = None,
comment: str | None = None,
units: dict[str, str | bytes] = None,
units: dict[str, str | bytes] | None = None,
) -> None:
"""
Appends a new data group from a Pandas data frame.
Expand Down Expand Up @@ -6927,7 +6927,7 @@ def _get_array(
shape = (shape[0],) + shape[1:][::-1]
vals = vals.reshape(shape)

axes = (0,) + tuple(range(len(shape) - 1, 0, -1))
axes = (0, *reversed(range(1, len(shape))))
vals = transpose(vals, axes=axes)

cycles_nr = len(vals)
Expand Down Expand Up @@ -8028,7 +8028,6 @@ def _yield_selected_signals(

signals.append(signal)

pass
else:
for channel_index in channels:
signal, invalidation_bits = self.get(
Expand Down Expand Up @@ -8288,7 +8287,7 @@ def get_master(

self._master_channel_metadata[index] = metadata

if not t.dtype == float64:
if t.dtype != float64:
t = t.astype(float64)

if raster and t.size:
Expand Down
3 changes: 1 addition & 2 deletions src/asammdf/blocks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

from collections.abc import Iterator
from copy import deepcopy
from enum import IntFlag
from functools import lru_cache
from io import StringIO
import json
Expand Down Expand Up @@ -120,7 +119,7 @@ def detect(text: bytes) -> DetectDict:
BLK_COMMON_uf = Struct("<4s4xQ").unpack_from
BLK_COMMON_u = Struct("<4s4xQ8x").unpack

EMPTY_TUPLE = tuple()
EMPTY_TUPLE = ()

_xmlns_pattern = re.compile(' xmlns="[^"]*"')

Expand Down
10 changes: 5 additions & 5 deletions src/asammdf/blocks/v2_v3_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -985,7 +985,7 @@ def __init__(self, **kwargs) -> None:

if conv_type == v23c.CONVERSION_TYPE_LINEAR:
(self.b, self.a) = unpack_from("<2d", block, v23c.CC_COMMON_SHORT_SIZE)
if not size == v23c.CC_LIN_BLOCK_SIZE:
if size != v23c.CC_LIN_BLOCK_SIZE:
self.CANapeHiddenExtra = block[v23c.CC_LIN_BLOCK_SIZE - 4 :]
size = len(self.CANapeHiddenExtra)
nr = size // 40
Expand Down Expand Up @@ -1151,7 +1151,7 @@ def __init__(self, **kwargs) -> None:
self.ref_param_nr = 2
self.b = kwargs.get("b", 0)
self.a = kwargs.get("a", 1)
if not self.block_len == v23c.CC_LIN_BLOCK_SIZE:
if self.block_len != v23c.CC_LIN_BLOCK_SIZE:
self.CANapeHiddenExtra = kwargs["CANapeHiddenExtra"]

elif kwargs["conversion_type"] in (
Expand Down Expand Up @@ -1323,7 +1323,7 @@ def metadata(self, indent: str = "") -> str:
keys = v23c.KEYS_CONVERSION_FORMULA
elif conv == v23c.CONVERSION_TYPE_LINEAR:
keys = v23c.KEYS_CONVERSION_LINEAR
if not self.block_len == v23c.CC_LIN_BLOCK_SIZE:
if self.block_len != v23c.CC_LIN_BLOCK_SIZE:
keys += ("CANapeHiddenExtra",)

nr = self.ref_param_nr
Expand Down Expand Up @@ -1640,7 +1640,7 @@ def __bytes__(self) -> bytes:
fmt = v23c.FMT_CONVERSION_FORMULA.format(self.block_len - v23c.CC_COMMON_BLOCK_SIZE)
elif conv == v23c.CONVERSION_TYPE_LINEAR:
fmt = v23c.FMT_CONVERSION_LINEAR
if not self.block_len == v23c.CC_LIN_BLOCK_SIZE:
if self.block_len != v23c.CC_LIN_BLOCK_SIZE:
fmt += f"{self.block_len - v23c.CC_LIN_BLOCK_SIZE}s"
elif conv in (v23c.CONVERSION_TYPE_POLY, v23c.CONVERSION_TYPE_RAT):
fmt = v23c.FMT_CONVERSION_POLY_RAT
Expand All @@ -1662,7 +1662,7 @@ def __bytes__(self) -> bytes:
keys = v23c.KEYS_CONVERSION_FORMULA
elif conv == v23c.CONVERSION_TYPE_LINEAR:
keys = v23c.KEYS_CONVERSION_LINEAR
if not self.block_len == v23c.CC_LIN_BLOCK_SIZE:
if self.block_len != v23c.CC_LIN_BLOCK_SIZE:
keys += ("CANapeHiddenExtra",)
elif conv in (v23c.CONVERSION_TYPE_POLY, v23c.CONVERSION_TYPE_RAT):
keys = v23c.KEYS_CONVERSION_POLY_RAT
Expand Down
2 changes: 1 addition & 1 deletion src/asammdf/blocks/v2_v3_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@
"max_raw_value",
"sampling_rate",
)
KEYS_CHANNEL_LONGNAME = KEYS_CHANNEL_SHORT + ("long_name_addr",)
KEYS_CHANNEL_LONGNAME = (*KEYS_CHANNEL_SHORT, "long_name_addr")

FMT_CHANNEL_GROUP = "<2sH3I3HI"
KEYS_CHANNEL_GROUP = (
Expand Down
4 changes: 2 additions & 2 deletions src/asammdf/blocks/v4_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@

FMT_CONVERSION_LINEAR = FMT_CONVERSION_NONE + "2d"
CONVERSION_LINEAR_PACK = struct.Struct(FMT_CONVERSION_LINEAR).pack
KEYS_CONVERSION_LINEAR = KEYS_CONVERSION_NONE + ("b", "a")
KEYS_CONVERSION_LINEAR = (*KEYS_CONVERSION_NONE, "b", "a")
FMT_CONVERSION_LINEAR_INIT = "<4Q2B3H4d"
CONVERSION_LINEAR_INIT_u = struct.Struct(FMT_CONVERSION_LINEAR_INIT).unpack
CONVERSION_LINEAR_INIT_uf = struct.Struct(FMT_CONVERSION_LINEAR_INIT).unpack_from
Expand Down Expand Up @@ -686,7 +686,7 @@

FMT_CONVERSION_RAT = FMT_CONVERSION_NONE + "6d"
CONVERSION_RAT_PACK = struct.Struct(FMT_CONVERSION_RAT).pack
KEYS_CONVERSION_RAT = KEYS_CONVERSION_NONE + ("P1", "P2", "P3", "P4", "P5", "P6")
KEYS_CONVERSION_RAT = (*KEYS_CONVERSION_NONE, "P1", "P2", "P3", "P4", "P5", "P6")

FMT_CONVERSION_RAT_INIT = "<4Q2B3H8d"

Expand Down
4 changes: 2 additions & 2 deletions src/asammdf/gui/dialogs/bus_database_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ def __init__(
buses = can_databases[::2]
dbs = can_databases[1::2]

databases["CAN"] = [(bus, database) for bus, database in zip(buses, dbs)]
databases["CAN"] = list(zip(buses, dbs))

lin_databases = self._settings.value("lin_databases", [])
buses = lin_databases[::2]
dbs = lin_databases[1::2]

databases["LIN"] = [(bus, database) for bus, database in zip(buses, dbs)]
databases["LIN"] = list(zip(buses, dbs))

self.widget = BusDatabaseManager(databases)

Expand Down
4 changes: 2 additions & 2 deletions src/asammdf/gui/dialogs/multi_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ def search_text_changed(self):

def _add(self, event):
count = self.selection.count()
names = set(self.selection.item(i).text() for i in range(count))
names = {self.selection.item(i).text() for i in range(count)}

to_add = set(item.text() for item in self.matches.selectedItems())
to_add = {item.text() for item in self.matches.selectedItems()}

names = names | to_add

Expand Down
8 changes: 4 additions & 4 deletions src/asammdf/gui/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import sys
from textwrap import indent
from threading import Thread
from time import perf_counter, sleep
from time import sleep
import traceback
from traceback import format_exc
from typing import Dict, Union
Expand Down Expand Up @@ -444,7 +444,7 @@ def compute_signal(
if found_numeric:
break

names = found_args + ["t"]
names = [*found_args, "t"]

triggering = description.get("triggering", "triggering_on_all")
if triggering == "triggering_on_all":
Expand Down Expand Up @@ -504,7 +504,7 @@ def compute_signal(
samples = []
for values in zip(*signals):
try:
current_sample = func(**{arg_name: arg_val for arg_name, arg_val in zip(names, values)})
current_sample = func(**dict(zip(names, values)))
except:
current_sample = COMPUTED_FUNCTION_ERROR_VALUE
samples.append(current_sample)
Expand Down Expand Up @@ -536,7 +536,7 @@ def compute_signal(
names.extend(not_found)
signals.extend(not_found_signals)

samples = func(**{arg_name: arg_signal for arg_name, arg_signal in zip(names, signals)})
samples = func(**dict(zip(names, signals)))
if len(samples) != len(common_timebase):
common_timebase = common_timebase[-len(samples) :]

Expand Down
54 changes: 25 additions & 29 deletions src/asammdf/gui/widgets/batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@
BUS_TYPE_FLEXRAY,
BUS_TYPE_LIN,
BUS_TYPE_USB,
FLAG_AT_TO_STRING,
FLAG_CG_BUS_EVENT,
)
from ...mdf import MDF, SUPPORTED_VERSIONS
from ..dialogs.advanced_search import AdvancedSearch
Expand Down Expand Up @@ -119,7 +117,7 @@ def __init__(
self.empty_channels_mat.insertItems(0, ("skip", "zeros"))
self.empty_channels_csv.insertItems(0, ("skip", "zeros"))
try:
import scipy
import scipy # noqa: F401

self.mat_format.insertItems(0, ("4", "5", "7.3"))
except:
Expand All @@ -144,13 +142,13 @@ def __init__(
buses = can_databases[::2]
dbs = can_databases[1::2]

databases["CAN"] = [(bus, database) for bus, database in zip(buses, dbs)]
databases["CAN"] = list(zip(buses, dbs))

lin_databases = self._settings.value("lin_databases", [])
buses = lin_databases[::2]
dbs = lin_databases[1::2]

databases["LIN"] = [(bus, database) for bus, database in zip(buses, dbs)]
databases["LIN"] = list(zip(buses, dbs))

for bus, database in databases["CAN"]:
item = QtWidgets.QListWidgetItem()
Expand Down Expand Up @@ -557,24 +555,22 @@ def extract_bus_csv_logging_thread(
)

result = mdf_.export(
**{
"fmt": "csv",
"filename": file_name,
"single_time_base": single_time_base,
"time_from_zero": time_from_zero,
"empty_channels": empty_channels,
"raster": raster or None,
"time_as_date": time_as_date,
"ignore_value2text_conversions": self.ignore_value2text_conversions,
"delimiter": delimiter,
"doublequote": doublequote,
"escapechar": escapechar,
"lineterminator": lineterminator,
"quotechar": quotechar,
"quoting": quoting,
"add_units": add_units,
"progress": progress,
}
fmt="csv",
filename=file_name,
single_time_base=single_time_base,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
raster=raster or None,
time_as_date=time_as_date,
ignore_value2text_conversions=self.ignore_value2text_conversions,
delimiter=delimiter,
doublequote=doublequote,
escapechar=escapechar,
lineterminator=lineterminator,
quotechar=quotechar,
quoting=quoting,
add_units=add_units,
progress=progress,
)
if result is TERMINATED:
return
Expand Down Expand Up @@ -1309,7 +1305,7 @@ def apply_processing(self, event):

if output_format == "HDF5":
try:
from h5py import File as HDF5
from h5py import File as HDF5 # noqa: F401
except ImportError:
MessageBox.critical(
self,
Expand All @@ -1331,7 +1327,7 @@ def apply_processing(self, event):
return
else:
try:
from scipy.io import savemat
from scipy.io import savemat # noqa: F401
except ImportError:
MessageBox.critical(
self,
Expand All @@ -1342,7 +1338,7 @@ def apply_processing(self, event):

elif output_format == "Parquet":
try:
from fastparquet import write as write_parquet
from fastparquet import write as write_parquet # noqa: F401
except ImportError:
MessageBox.critical(
self,
Expand Down Expand Up @@ -1376,18 +1372,18 @@ def apply_processing_thread(self, progress):

if output_format == "HDF5":
suffix = ".hdf"
from h5py import File as HDF5
from h5py import File as HDF5 # noqa: F401

elif output_format == "MAT":
suffix = ".mat"
if opts.mat_format == "7.3":
from hdf5storage import savemat
else:
from scipy.io import savemat
from scipy.io import savemat # noqa: F401

elif output_format == "Parquet":
suffix = ".parquet"
from fastparquet import write as write_parquet
from fastparquet import write as write_parquet # noqa: F401

elif output_format == "CSV":
suffix = ".csv"
Expand Down
Loading

0 comments on commit 48a4dde

Please sign in to comment.