Skip to content

Commit

Permalink
Merge pull request #131 from asmeurer/json-reporting
Browse files Browse the repository at this point in the history
JSON reporting
  • Loading branch information
asmeurer authored Aug 19, 2022
2 parents 4d9d7b4 + 0841ef3 commit 66ab89c
Show file tree
Hide file tree
Showing 6 changed files with 157 additions and 2 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,6 @@ dmypy.json

# Pyre type checker
.pyre/

# pytest-json-report
.report.json
5 changes: 5 additions & 0 deletions array_api_tests/stubs.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
__all__ = [
"name_to_func",
"array_methods",
"array_attributes",
"category_to_funcs",
"EXTENSIONS",
"extension_to_funcs",
Expand All @@ -34,6 +35,10 @@
f for n, f in inspect.getmembers(array, predicate=inspect.isfunction)
if n != "__init__" # probably exists for Sphinx
]
array_attributes = [
n for n, f in inspect.getmembers(array, predicate=lambda x: not inspect.isfunction(x))
if n != "__init__" # probably exists for Sphinx
]

category_to_funcs: Dict[str, List[FunctionType]] = {}
for name, mod in name_to_mod.items():
Expand Down
37 changes: 37 additions & 0 deletions array_api_tests/test_has_names.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
"""
This is a very basic test to see what names are defined in a library. It
does not even require functioning hypothesis array_api support.
"""

import pytest

from ._array_module import mod as xp, mod_name
from .stubs import (array_attributes, array_methods, category_to_funcs,
extension_to_funcs, EXTENSIONS)

has_name_params = []
for ext, stubs in extension_to_funcs.items():
for stub in stubs:
has_name_params.append(pytest.param(ext, stub.__name__))
for cat, stubs in category_to_funcs.items():
for stub in stubs:
has_name_params.append(pytest.param(cat, stub.__name__))
for meth in array_methods:
has_name_params.append(pytest.param('array_method', meth.__name__))
for attr in array_attributes:
has_name_params.append(pytest.param('array_attribute', attr))

@pytest.mark.parametrize("category, name", has_name_params)
def test_has_names(category, name):
if category in EXTENSIONS:
ext_mod = getattr(xp, category)
assert hasattr(ext_mod, name), f"{mod_name} is missing the {category} extension function {name}()"
elif category.startswith('array_'):
# TODO: This would fail if ones() is missing.
arr = xp.ones((1, 1))
if category == 'array_attribute':
assert hasattr(arr, name), f"The {mod_name} array object is missing the attribute {name}"
else:
assert hasattr(arr, name), f"The {mod_name} array object is missing the method {name}()"
else:
assert hasattr(xp, name), f"{mod_name} is missing the {category} function {name}()"
5 changes: 3 additions & 2 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@
from array_api_tests import _array_module as xp
from array_api_tests._array_module import _UndefinedStub

settings.register_profile("xp_default", deadline=800)
from reporting import pytest_metadata, pytest_json_modifyreport, add_extra_json_metadata # noqa

settings.register_profile("xp_default", deadline=800)

def pytest_addoption(parser):
# Hypothesis max examples
Expand Down Expand Up @@ -120,7 +121,7 @@ def pytest_collection_modifyitems(config, items):
mark.skip(reason="disabled via --disable-data-dependent-shapes")
)
break
# skip if test not appropiate for CI
# skip if test not appropriate for CI
if ci:
ci_mark = next((m for m in markers if m.name == "ci"), None)
if ci_mark is None:
Expand Down
108 changes: 108 additions & 0 deletions reporting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
from array_api_tests.dtype_helpers import dtype_to_name
from array_api_tests import _array_module as xp
from array_api_tests import __version__

from collections import Counter
from types import BuiltinFunctionType, FunctionType
import dataclasses
import json
import warnings

from hypothesis.strategies import SearchStrategy

from pytest import mark, fixture
try:
import pytest_jsonreport # noqa
except ImportError:
raise ImportError("pytest-json-report is required to run the array API tests")

def to_json_serializable(o):
if o in dtype_to_name:
return dtype_to_name[o]
if isinstance(o, (BuiltinFunctionType, FunctionType, type)):
return o.__name__
if dataclasses.is_dataclass(o):
return to_json_serializable(dataclasses.asdict(o))
if isinstance(o, SearchStrategy):
return repr(o)
if isinstance(o, dict):
return {to_json_serializable(k): to_json_serializable(v) for k, v in o.items()}
if isinstance(o, tuple):
if hasattr(o, '_asdict'): # namedtuple
return to_json_serializable(o._asdict())
return tuple(to_json_serializable(i) for i in o)
if isinstance(o, list):
return [to_json_serializable(i) for i in o]

# Ensure everything is JSON serializable. If this warning is issued, it
# means the given type needs to be added above if possible.
try:
json.dumps(o)
except TypeError:
warnings.warn(f"{o!r} (of type {type(o)}) is not JSON-serializable. Using the repr instead.")
return repr(o)

return o

@mark.optionalhook
def pytest_metadata(metadata):
"""
Additional global metadata for --json-report.
"""
metadata['array_api_tests_module'] = xp.mod_name
metadata['array_api_tests_version'] = __version__

@fixture(autouse=True)
def add_extra_json_metadata(request, json_metadata):
"""
Additional per-test metadata for --json-report
"""
def add_metadata(name, obj):
obj = to_json_serializable(obj)
json_metadata[name] = obj

test_module = request.module.__name__
if test_module.startswith('array_api_tests.meta'):
return

test_function = request.function.__name__
assert test_function.startswith('test_'), 'unexpected test function name'

if test_module == 'array_api_tests.test_has_names':
array_api_function_name = None
else:
array_api_function_name = test_function[len('test_'):]

add_metadata('test_module', test_module)
add_metadata('test_function', test_function)
add_metadata('array_api_function_name', array_api_function_name)

if hasattr(request.node, 'callspec'):
params = request.node.callspec.params
add_metadata('params', params)

def finalizer():
# TODO: This metadata is all in the form of error strings. It might be
# nice to extract the hypothesis failing inputs directly somehow.
if hasattr(request.node, 'hypothesis_report_information'):
add_metadata('hypothesis_report_information', request.node.hypothesis_report_information)
if hasattr(request.node, 'hypothesis_statistics'):
add_metadata('hypothesis_statistics', request.node.hypothesis_statistics)

request.addfinalizer(finalizer)

def pytest_json_modifyreport(json_report):
# Deduplicate warnings. These duplicate warnings can cause the file size
# to become huge. For instance, a warning from np.bool which is emitted
# every time hypothesis runs (over a million times) causes the warnings
# JSON for a plain numpy namespace run to be over 500MB.

# This will lose information about what order the warnings were issued in,
# but that isn't particularly helpful anyway since the warning metadata
# doesn't store a full stack of where it was issued from. The resulting
# warnings will be in order of the first time each warning is issued since
# collections.Counter is ordered just like dict().
counted_warnings = Counter([frozenset(i.items()) for i in json_report['warnings']])
deduped_warnings = [{**dict(i), 'count': counted_warnings[i]} for i in counted_warnings]

json_report['warnings'] = deduped_warnings
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
pytest
pytest-json-report
hypothesis>=6.45.0
ndindex>=1.6

0 comments on commit 66ab89c

Please sign in to comment.