Skip to content

Commit

Permalink
More aggressive ruff refactors (unsafe). Remove that compat commonpath.
Browse files Browse the repository at this point in the history
  • Loading branch information
ionelmc committed Dec 14, 2023
1 parent fcb183c commit e881fa7
Show file tree
Hide file tree
Showing 15 changed files with 86 additions and 136 deletions.
7 changes: 6 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,11 @@ ignore = [
"S101", # flake8-bandit assert
"S308", # flake8-bandit suspicious-mark-safe-usage
"E501", # pycodestyle line-too-long
"PT004",
"PT011",
"PT013",
"S603",
"S607",
]
line-length = 140
select = [
Expand All @@ -28,7 +33,7 @@ select = [
"PLC", # pylint convention
"PLE", # pylint errors
"PT", # flake8-pytest-style
"PTH", # flake8-use-pathlib
# "PTH", # flake8-use-pathlib
"Q", # flake8-quotes
"RSE", # flake8-raise
"RUF", # ruff-specific rules
Expand Down
4 changes: 2 additions & 2 deletions src/pytest_benchmark/csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def render(self, output_file, groups):
with output_file.open('w', ensure=True) as stream:
writer = csv.writer(stream)
params = sorted(
set(param for group, benchmarks in groups for benchmark in benchmarks for param in benchmark.get('params', {}) or ())
{param for group, benchmarks in groups for benchmark in benchmarks for param in benchmark.get('params', {}) or ()}
)
writer.writerow(
[
Expand All @@ -27,7 +27,7 @@ def render(self, output_file, groups):
+ self.columns
)

for group, benchmarks in groups:
for _, benchmarks in groups:
benchmarks = sorted(benchmarks, key=operator.itemgetter(self.sort))

for bench in benchmarks:
Expand Down
24 changes: 10 additions & 14 deletions src/pytest_benchmark/fixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import sys
import time
import traceback
import typing
from math import ceil

from .timers import compute_timer_precision
Expand All @@ -25,7 +26,7 @@ class FixtureAlreadyUsed(Exception):


class BenchmarkFixture:
_precisions = {}
_precisions: typing.ClassVar = {}

def __init__(
self,
Expand Down Expand Up @@ -85,9 +86,7 @@ def _get_precision(self, timer):
else:
timer_precision = self._precisions[timer] = compute_timer_precision(timer)
self._logger.debug('')
self._logger.debug(
'Computing precision for %s ... %ss.' % (NameWrapper(timer), format_time(timer_precision)), blue=True, bold=True
)
self._logger.debug(f'Computing precision for {NameWrapper(timer)} ... {format_time(timer_precision)}s.', blue=True, bold=True)
return timer_precision

def _make_runner(self, function_to_benchmark, args, kwargs):
Expand Down Expand Up @@ -170,11 +169,11 @@ def _raw(self, function_to_benchmark, *args, **kwargs):

stats = self._make_stats(iterations)

self._logger.debug(' Running %s rounds x %s iterations ...' % (rounds, iterations), yellow=True, bold=True)
self._logger.debug(f' Running {rounds} rounds x {iterations} iterations ...', yellow=True, bold=True)
run_start = time.time()
if self._warmup:
warmup_rounds = min(rounds, max(1, int(self._warmup / iterations)))
self._logger.debug(' Warmup %s rounds x %s iterations ...' % (warmup_rounds, iterations))
self._logger.debug(f' Warmup {warmup_rounds} rounds x {iterations} iterations ...')
for _ in range(warmup_rounds):
runner(loops_range)
for _ in range(rounds):
Expand Down Expand Up @@ -253,7 +252,7 @@ def weave(self, target, **kwargs):
try:
import aspectlib
except ImportError as exc:
raise ImportError(exc.args, 'Please install aspectlib or pytest-benchmark[aspect]')
raise ImportError(exc.args, 'Please install aspectlib or pytest-benchmark[aspect]') from exc

def aspect(function):
def wrapper(*args, **kwargs):
Expand All @@ -278,9 +277,8 @@ def _calibrate_timer(self, runner):
min_time_estimate = min_time * 5 / self._calibration_precision
self._logger.debug('')
self._logger.debug(
' Calibrating to target round %ss; will estimate when reaching %ss '
'(using: %s, precision: %ss).'
% (format_time(min_time), format_time(min_time_estimate), NameWrapper(self._timer), format_time(timer_precision)),
f' Calibrating to target round {format_time(min_time)}s; will estimate when reaching {format_time(min_time_estimate)}s '
f'(using: {NameWrapper(self._timer)}, precision: {format_time(timer_precision)}s).',
yellow=True,
bold=True,
)
Expand All @@ -297,11 +295,9 @@ def _calibrate_timer(self, runner):
duration = min(duration, runner(loops_range))
warmup_rounds += 1
warmup_iterations += loops
self._logger.debug(
' Warmup: %ss (%s x %s iterations).' % (format_time(time.time() - warmup_start), warmup_rounds, loops)
)
self._logger.debug(f' Warmup: {format_time(time.time() - warmup_start)}s ({warmup_rounds} x {loops} iterations).')

self._logger.debug(' Measured %s iterations: %ss.' % (loops, format_time(duration)), yellow=True)
self._logger.debug(f' Measured {loops} iterations: {format_time(duration)}s.', yellow=True)
if duration >= min_time:
break

Expand Down
6 changes: 3 additions & 3 deletions src/pytest_benchmark/histogram.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from pygal.graph.box import Box
from pygal.style import DefaultStyle
except ImportError as exc:
raise ImportError(exc.args, 'Please install pygal and pygaljs or pytest-benchmark[histogram]')
raise ImportError(exc.args, 'Please install pygal and pygaljs or pytest-benchmark[histogram]') from exc


class CustomBox(Box):
Expand Down Expand Up @@ -37,13 +37,13 @@ def _format(self, x, *args):
return sup(x, *args)

def _tooltip_data(self, node, value, x, y, classes=None, xlabel=None):
super(CustomBox, self)._tooltip_data(node, value[0], x, y, classes=classes, xlabel=None)
super()._tooltip_data(node, value[0], x, y, classes=classes, xlabel=None)
self.svg.node(node, 'desc', class_='x_label').text = value[1]


def make_plot(benchmarks, title, adjustment):
class Style(DefaultStyle):
colors = ['#000000' if row['path'] else DefaultStyle.colors[1] for row in benchmarks]
colors = tuple('#000000' if row['path'] else DefaultStyle.colors[1] for row in benchmarks)
font_family = 'Consolas, "Deja Vu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace'

minimum = int(min(row['min'] * adjustment for row in benchmarks))
Expand Down
13 changes: 6 additions & 7 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,7 @@ def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info

if compared_machine_info != machine_info:
benchmarksession.logger.warning(
'Benchmark machine_info is different. Current: %s VS saved: %s (location: %s).'
% (
'Benchmark machine_info is different. Current: {} VS saved: {} (location: {}).'.format(
machine_info,
compared_machine_info,
benchmarksession.storage.location,
Expand Down Expand Up @@ -335,7 +334,7 @@ def pytest_benchmark_group_stats(config, benchmarks, group_by):
key += (bench['param'],)
elif grouping.startswith('param:'):
param_name = grouping[len('param:') :]
key += ('%s=%s' % (param_name, bench['params'][param_name]),)
key += ('{}={}'.format(param_name, bench['params'][param_name]),)
else:
raise NotImplementedError('Unsupported grouping %r.' % group_by)
groups[' '.join(str(p) for p in key if p) or None].append(bench)
Expand Down Expand Up @@ -420,7 +419,7 @@ def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_inf
return output_json


@pytest.fixture(scope='function')
@pytest.fixture
def benchmark(request):
bs = request.config._benchmarksession

Expand All @@ -440,11 +439,11 @@ def benchmark(request):
disabled=bs.disabled,
**dict(bs.options, **options),
)
request.addfinalizer(fixture._cleanup)
return fixture
yield fixture
fixture._cleanup()


@pytest.fixture(scope='function')
@pytest.fixture
def benchmark_weave(benchmark):
return benchmark.weave

Expand Down
28 changes: 15 additions & 13 deletions src/pytest_benchmark/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,17 @@ def __init__(self, config):
default_machine_id=self.machine_id,
netrc=config.getoption('benchmark_netrc'),
)
self.options = dict(
min_time=SecondsDecimal(config.getoption('benchmark_min_time')),
min_rounds=config.getoption('benchmark_min_rounds'),
max_time=SecondsDecimal(config.getoption('benchmark_max_time')),
timer=load_timer(config.getoption('benchmark_timer')),
calibration_precision=config.getoption('benchmark_calibration_precision'),
disable_gc=config.getoption('benchmark_disable_gc'),
warmup=config.getoption('benchmark_warmup'),
warmup_iterations=config.getoption('benchmark_warmup_iterations'),
cprofile=bool(config.getoption('benchmark_cprofile')),
)
self.options = {
'min_time': SecondsDecimal(config.getoption('benchmark_min_time')),
'min_rounds': config.getoption('benchmark_min_rounds'),
'max_time': SecondsDecimal(config.getoption('benchmark_max_time')),
'timer': load_timer(config.getoption('benchmark_timer')),
'calibration_precision': config.getoption('benchmark_calibration_precision'),
'disable_gc': config.getoption('benchmark_disable_gc'),
'warmup': config.getoption('benchmark_warmup'),
'warmup_iterations': config.getoption('benchmark_warmup_iterations'),
'cprofile': bool(config.getoption('benchmark_cprofile')),
}
self.skip = config.getoption('benchmark_skip')
self.disabled = config.getoption('benchmark_disable') and not config.getoption('benchmark_enable')
self.cprofile_sort_by = config.getoption('benchmark_cprofile')
Expand Down Expand Up @@ -191,7 +191,7 @@ def handle_loading(self):
machine_info=machine_info,
compared_benchmark=compared_benchmark,
)
compared_mapping[path] = dict((bench['fullname'], bench) for bench in compared_benchmark['benchmarks'])
compared_mapping[path] = {bench['fullname']: bench for bench in compared_benchmark['benchmarks']}
self.logger.info('Comparing against benchmarks from: %s' % path, newline=False)
self.compared_mapping = compared_mapping

Expand Down Expand Up @@ -228,7 +228,9 @@ def check_regressions(self):
raise pytest.UsageError('--benchmark-compare-fail requires valid --benchmark-compare.')

if self.performance_regressions:
self.logger.error('Performance has regressed:\n%s' % '\n'.join('\t%s - %s' % line for line in self.performance_regressions))
self.logger.error(
'Performance has regressed:\n%s' % '\n'.join('\t{} - {}'.format(*line) for line in self.performance_regressions)
)
raise PerformanceRegression('Performance has regressed.')

def display_cprofile(self, tr):
Expand Down
6 changes: 3 additions & 3 deletions src/pytest_benchmark/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __nonzero__(self):
return bool(self.data)

def as_dict(self):
return dict((field, getattr(self, field)) for field in self.fields)
return {field: getattr(self, field) for field in self.fields}

def update(self, duration):
self.data.append(duration)
Expand Down Expand Up @@ -168,7 +168,7 @@ def iqr_outliers(self):

@cached_property
def outliers(self):
return '%s;%s' % (self.stddev_outliers, self.iqr_outliers)
return f'{self.stddev_outliers};{self.iqr_outliers}'

@cached_property
def ops(self):
Expand Down Expand Up @@ -222,7 +222,7 @@ def as_dict(self, include_data=True, flat=False, stats=True, cprofile=None):
'params': self.params,
'param': self.param,
'extra_info': self.extra_info,
'options': dict((k, funcname(v) if callable(v) else v) for k, v in self.options.items()),
'options': {k: funcname(v) if callable(v) else v for k, v in self.options.items()},
}
if self.cprofile_stats:
cprofile_list = result['cprofile'] = []
Expand Down
12 changes: 6 additions & 6 deletions src/pytest_benchmark/storage/elasticsearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
try:
import elasticsearch
from elasticsearch.serializer import JSONSerializer
except ImportError:
raise ImportError('Please install elasticsearch or pytest-benchmark[elasticsearch]')
except ImportError as exc:
raise ImportError('Please install elasticsearch or pytest-benchmark[elasticsearch]') from exc


class BenchmarkJSONSerializer(JSONSerializer):
Expand Down Expand Up @@ -66,8 +66,8 @@ def load(self, id_prefix=None):
"""
r = self._search(self._project_name, id_prefix)
groupped_data = self._group_by_commit_and_time(r['hits']['hits'])
result = [(key, value) for key, value in groupped_data.items()]
result.sort(key=lambda x: datetime.strptime(x[1]['datetime'], '%Y-%m-%dT%H:%M:%S.%f'))
result = list(groupped_data.items())
result.sort(key=lambda x: datetime.strptime(x[1]['datetime'], '%Y-%m-%dT%H:%M:%S.%f')) # noqa: DTZ007
for key, data in result:
for bench in data['benchmarks']:
normalize_stats(bench['stats'])
Expand Down Expand Up @@ -102,7 +102,7 @@ def _group_by_commit_and_time(self, hits):
result = {}
for hit in hits:
source_hit = hit['_source']
key = '%s_%s' % (source_hit['commit_info']['id'], source_hit['datetime'])
key = '{}_{}'.format(source_hit['commit_info']['id'], source_hit['datetime'])
benchmark = self._benchmark_from_es_record(source_hit)
if key in result:
result[key]['benchmarks'].append(benchmark)
Expand Down Expand Up @@ -143,7 +143,7 @@ def save(self, output_json, save):
)
# hide user's credentials before logging
masked_hosts = _mask_hosts(self._es_hosts)
self.logger.info('Saved benchmark data to %s to index %s as doctype %s' % (masked_hosts, self._es_index, self._es_doctype))
self.logger.info(f'Saved benchmark data to {masked_hosts} to index {self._es_index} as doctype {self._es_doctype}')

def _create_index(self):
mapping = {
Expand Down
4 changes: 2 additions & 2 deletions src/pytest_benchmark/storage/file.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import json
import os
from os.path import commonpath

from ..stats import normalize_stats
from ..utils import Path
from ..utils import commonpath
from ..utils import safe_dumps
from ..utils import short_filename

Expand Down Expand Up @@ -48,7 +48,7 @@ def _next_num(self):
raise

def save(self, output_json, save):
output_file = self.get('%s_%s.json' % (self._next_num, save))
output_file = self.get(f'{self._next_num}_{save}.json')
assert not output_file.exists()
with output_file.open('wb') as fh:
fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode())
Expand Down
16 changes: 9 additions & 7 deletions src/pytest_benchmark/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,18 @@ def display(self, tr, groups, progress_reporter=report_progress):
worst = {}
best = {}
solo = len(benchmarks) == 1
for line, prop in progress_reporter(('min', 'max', 'mean', 'median', 'iqr', 'stddev', 'ops'), tr, '{line}: {value}', line=line):
for line1, prop in progress_reporter(
('min', 'max', 'mean', 'median', 'iqr', 'stddev', 'ops'), tr, '{line}: {value}', line=line
):
if prop == 'ops':
worst[prop] = min(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line))
best[prop] = max(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line))
worst[prop] = min(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line1))
best[prop] = max(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line1))
else:
worst[prop] = max(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line))
best[prop] = min(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line))
for line, prop in progress_reporter(('outliers', 'rounds', 'iterations'), tr, '{line}: {value}', line=line):
worst[prop] = max(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line1))
best[prop] = min(bench[prop] for _, bench in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line1))
for line1, prop in progress_reporter(('outliers', 'rounds', 'iterations'), tr, '{line}: {value}', line=line):
worst[prop] = max(
benchmark[prop] for _, benchmark in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line)
benchmark[prop] for _, benchmark in progress_reporter(benchmarks, tr, '{line} ({pos}/{total})', line=line1)
)

unit, adjustment = self.scale_unit(unit='seconds', benchmarks=benchmarks, best=best, worst=worst, sort=self.sort)
Expand Down
Loading

0 comments on commit e881fa7

Please sign in to comment.