-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
9 changed files
with
1,941 additions
and
13 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,146 @@ | ||
# Copyright 2021-2023 Kolena Inc. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
from dataclasses import dataclass | ||
from typing import List | ||
|
||
import pytest | ||
|
||
from kolena.detection import Model | ||
from kolena.detection import TestCase | ||
from kolena.detection import TestImage | ||
from kolena.detection import TestSuite | ||
from kolena.detection.ground_truth import BoundingBox | ||
from kolena.detection.ground_truth import ClassificationLabel | ||
from kolena.detection.ground_truth import SegmentationMask | ||
from tests.integration.helper import fake_locator | ||
from tests.integration.helper import with_test_prefix | ||
|
||
|
||
@dataclass(frozen=True) | ||
class TestData: | ||
test_cases: List[TestCase] | ||
test_suites: List[TestSuite] | ||
models: List[Model] | ||
locators: List[str] | ||
|
||
|
||
@pytest.fixture(scope="session") | ||
def detection_test_data() -> TestData: | ||
ground_truths = [ | ||
ClassificationLabel("car"), | ||
ClassificationLabel("bike"), | ||
BoundingBox("boat", top_left=(0.0, 1.5), bottom_right=(0.3, 3.4)), | ||
SegmentationMask("van", [(4.0, 1.5), (0.9, 3.4), (19.5, 17.6), (8, 8)]), | ||
BoundingBox("boat", top_left=(50, 60), bottom_right=(60, 100)), | ||
BoundingBox("pedestrian", top_left=(120, 70), bottom_right=(190, 100)), | ||
SegmentationMask("truck", [(0, 15), (0.9, 3.4), (19.5, 17.6), (0, 15)]), | ||
SegmentationMask("airplane", [(4.0, 1.5), (0.9, 3.4), (19.5, 17.6), (8, 8)]), | ||
] | ||
dataset = with_test_prefix("fake-data-set") | ||
images = [(fake_locator(i, "detection/base"), {"example": "metadata", "i": i}) for i in range(5)] | ||
|
||
test_case_a = TestCase( | ||
with_test_prefix("A"), | ||
description="filler", | ||
images=[ | ||
TestImage(locator=images[0][0], dataset=dataset, metadata=images[0][1], ground_truths=[ground_truths[0]]), | ||
TestImage(locator=images[1][0], dataset=dataset, metadata=images[1][1]), | ||
], | ||
) | ||
test_case_a_updated = TestCase( | ||
with_test_prefix("A"), | ||
description="description", | ||
images=[ | ||
TestImage(locator=images[0][0], dataset=dataset, metadata=images[0][1], ground_truths=[ground_truths[0]]), | ||
TestImage(locator=images[1][0], dataset=dataset, metadata=images[1][1]), | ||
TestImage(locator=images[2][0], dataset=dataset, metadata=images[2][1], ground_truths=[ground_truths[2]]), | ||
TestImage(locator=images[4][0], dataset=dataset, metadata=images[4][1]), | ||
], | ||
reset=True, | ||
) | ||
test_case_b = TestCase( | ||
with_test_prefix("B"), | ||
description="fields", | ||
images=[ | ||
TestImage( | ||
locator=images[2][0], | ||
dataset=dataset, | ||
metadata=images[2][1], | ||
ground_truths=[ground_truths[1], ground_truths[2]], | ||
), | ||
TestImage(locator=images[3][0], dataset=dataset, metadata=images[3][1], ground_truths=[ground_truths[4]]), | ||
], | ||
) | ||
test_case_b_updated = TestCase( | ||
with_test_prefix("B"), | ||
description="etc", | ||
images=[ | ||
TestImage(locator=images[1][0], dataset=dataset, metadata=images[1][1]), | ||
TestImage( | ||
locator=images[2][0], | ||
dataset=dataset, | ||
metadata=images[2][1], | ||
ground_truths=[ | ||
ground_truths[2], | ||
ground_truths[3], | ||
], | ||
), | ||
TestImage( | ||
locator=images[3][0], | ||
dataset=dataset, | ||
metadata=images[3][1], | ||
ground_truths=[ | ||
ground_truths[5], | ||
ground_truths[7], | ||
], | ||
), | ||
], | ||
reset=True, | ||
) | ||
test_case_b_subset = TestCase( | ||
with_test_prefix("B_subset"), | ||
description="and more!", | ||
images=[ | ||
TestImage(locator=images[3][0], dataset=dataset, metadata=images[3][1], ground_truths=[ground_truths[6]]), | ||
], | ||
) | ||
|
||
test_cases = [test_case_a, test_case_a_updated, test_case_b, test_case_b_updated, test_case_b_subset] | ||
|
||
test_suite_name_a = with_test_prefix("A") | ||
test_suite_a = TestSuite(test_suite_name_a, description="filler", test_cases=[test_case_a, test_case_b]) | ||
test_suite_a_updated = TestSuite( | ||
test_suite_name_a, | ||
description="description", | ||
test_cases=[test_case_a_updated, test_case_b], | ||
reset=True, | ||
) | ||
test_suite_b = TestSuite(with_test_prefix("B"), description="fields", test_cases=[test_case_b_updated]) | ||
test_suite_a_subset = TestSuite( | ||
with_test_prefix("A_subset"), | ||
description="etc", | ||
test_cases=[test_case_b_subset], | ||
) | ||
|
||
test_suites = [test_suite_a, test_suite_a_updated, test_suite_b, test_suite_a_subset] | ||
|
||
models = [ | ||
Model(with_test_prefix("a"), metadata={"some": "metadata"}), | ||
Model(with_test_prefix("b"), metadata={"one": 1, "false": False}), | ||
] | ||
|
||
return TestData(test_cases=test_cases, test_suites=test_suites, models=models, locators=[img[0] for img in images]) | ||
|
||
|
||
pytest.register_assert_rewrite("tests.integration.detection.helper") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
# Copyright 2021-2023 Kolena Inc. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
import json | ||
import random | ||
from typing import List | ||
from typing import Tuple | ||
|
||
from kolena.detection import ground_truth | ||
from kolena.detection import inference | ||
from kolena.detection import TestImage | ||
|
||
fake_labels = [ | ||
"car", | ||
"bike", | ||
"house", | ||
"airplane", | ||
"boat", | ||
"bus", | ||
"animal", | ||
"person", | ||
"cow", | ||
"cat", | ||
"dog", | ||
"parakeet", | ||
"weasel", | ||
"rabbit", | ||
"mouse", | ||
"rat", | ||
"anteater", | ||
"aardvark", | ||
"whale", | ||
"seal", | ||
"walrus", | ||
"butterfly", | ||
"hawk", | ||
"pigeon", | ||
"goose", | ||
] | ||
|
||
|
||
def fake_label() -> str: | ||
return random.choice(fake_labels) | ||
|
||
|
||
def fake_points(n: int) -> List[Tuple[float, float]]: | ||
return [(round(random.random() * 300, 3), round(random.random(), 3)) for _ in range(n)] | ||
|
||
|
||
def fake_gt_classification_label() -> ground_truth.ClassificationLabel: | ||
return ground_truth.ClassificationLabel(fake_label()) | ||
|
||
|
||
def fake_gt_bounding_box() -> ground_truth.BoundingBox: | ||
return ground_truth.BoundingBox(fake_label(), *fake_points(2)) | ||
|
||
|
||
def fake_gt_segmentation_mask() -> ground_truth.SegmentationMask: | ||
return ground_truth.SegmentationMask(fake_label(), fake_points(random.randint(3, 15))) | ||
|
||
|
||
def fake_confidence() -> float: | ||
return round(random.random(), 3) | ||
|
||
|
||
def fake_inference_classification_label() -> inference.ClassificationLabel: | ||
return inference.ClassificationLabel(fake_label(), fake_confidence()) | ||
|
||
|
||
def fake_inference_bounding_box() -> inference.BoundingBox: | ||
return inference.BoundingBox(fake_label(), fake_confidence(), *fake_points(2)) | ||
|
||
|
||
def fake_inference_segmentation_mask() -> inference.SegmentationMask: | ||
return inference.SegmentationMask(fake_label(), fake_confidence(), fake_points(random.randint(3, 15))) | ||
|
||
|
||
def assert_test_image_equal(a: TestImage, b: TestImage) -> None: | ||
assert a.locator == b.locator | ||
assert a.dataset == b.dataset | ||
assert a.metadata == b.metadata | ||
assert sorted(a.ground_truths, key=lambda x: json.dumps(x._to_dict(), sort_keys=True)) == sorted( | ||
b.ground_truths, | ||
key=lambda x: json.dumps(x._to_dict(), sort_keys=True), | ||
) | ||
|
||
|
||
def assert_test_images_equal(actual: List[TestImage], expected: List[TestImage]) -> None: | ||
assert len(actual) == len(expected) | ||
actual = sorted(actual, key=lambda x: x.locator) | ||
expected = sorted(expected, key=lambda x: x.locator) | ||
for a, b in zip(actual, expected): | ||
assert_test_image_equal(a, b) |
Oops, something went wrong.