Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add summary diagnostics to spec test gen #2926

Merged
merged 1 commit into from
Jul 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -1041,7 +1041,7 @@ def run(self):
extras_require={
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
"lint": ["flake8==3.7.7", "mypy==0.812", "pylint==2.12.2"],
"generator": ["python-snappy==0.5.4"],
"generator": ["python-snappy==0.5.4", "filelock"],
},
install_requires=[
"eth-utils>=1.3.0,<2",
Expand Down
43 changes: 38 additions & 5 deletions tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
import shutil
import argparse
from pathlib import Path
from filelock import FileLock
import sys
import json
from typing import Iterable, AnyStr, Any, Callable
import traceback

Expand Down Expand Up @@ -111,6 +113,8 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
collected_test_count = 0
generated_test_count = 0
skipped_test_count = 0
test_identifiers = []

provider_start = time.time()
for tprov in test_providers:
if not collect_only:
Expand All @@ -123,12 +127,10 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
/ Path(test_case.runner_name) / Path(test_case.handler_name)
/ Path(test_case.suite_name) / Path(test_case.case_name)
)
incomplete_tag_file = case_dir / "INCOMPLETE"

collected_test_count += 1
if collect_only:
print(f"Collected test at: {case_dir}")
continue
print(f"Collected test at: {case_dir}")

incomplete_tag_file = case_dir / "INCOMPLETE"

if case_dir.exists():
if not args.force and not incomplete_tag_file.exists():
Expand Down Expand Up @@ -198,6 +200,15 @@ def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
shutil.rmtree(case_dir)
else:
generated_test_count += 1
test_identifier = "::".join([
test_case.preset_name,
test_case.fork_name,
test_case.runner_name,
test_case.handler_name,
test_case.suite_name,
test_case.case_name
])
test_identifiers.append(test_identifier)
# Only remove `INCOMPLETE` tag file
os.remove(incomplete_tag_file)
test_end = time.time()
Expand All @@ -216,6 +227,28 @@ def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
if span > TIME_THRESHOLD_TO_PRINT:
summary_message += f" in {span} seconds"
print(summary_message)
diagnostics = {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we have an assertion to verify if generated_test_count + test_identifiers == collected_test_count?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

something like this doesn't hurt but I am also not convinced these assertions hold -- in particular I was considering this for the skipped test count and essentially collected == generated + skipped did NOT hold... haven't looked more into it beyond that

"collected_test_count": collected_test_count,
"generated_test_count": generated_test_count,
"skipped_test_count": skipped_test_count,
"test_identifiers": test_identifiers,
"durations": [f"{span} seconds"],
ralexstokes marked this conversation as resolved.
Show resolved Hide resolved
}
diagnostics_path = Path(os.path.join(output_dir, "diagnostics.json"))
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics.json.lock"))
with diagnostics_lock:
diagnostics_path.touch(exist_ok=True)
if os.path.getsize(diagnostics_path) == 0:
with open(diagnostics_path, "w+") as f:
json.dump(diagnostics, f)
else:
with open(diagnostics_path, "r+") as f:
existing_diagnostics = json.load(f)
for k, v in diagnostics.items():
existing_diagnostics[k] += v
with open(diagnostics_path, "w+") as f:
json.dump(existing_diagnostics, f)
print(f"wrote diagnostics to {diagnostics_path}")


def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
Expand Down