Skip to content

Commit

Permalink
Add detection integration tests (#10)
Browse files Browse the repository at this point in the history
Add detection integration tests
  • Loading branch information
ly-kolena authored May 11, 2023
1 parent 2000f12 commit 295f79f
Show file tree
Hide file tree
Showing 9 changed files with 1,942 additions and 6 deletions.
59 changes: 53 additions & 6 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ parameters:
default: "3.9"

jobs:
kolena-ci:
unit-test:
parameters:
python-version:
type: string
Expand All @@ -22,13 +22,13 @@ jobs:
steps:
- checkout
- restore_cache:
key: &kolena-ci-cache kolena-ci-cache-<< parameters.python-version >>-{{ checksum "pyproject.toml" }}
key: &ci-base-cache ci-cache-<< parameters.python-version >>-{{ checksum "pyproject.toml" }}
- run: |
poetry config experimental.new-installer false
poetry config installer.max-workers 10
poetry install --no-ansi
- save_cache:
key: *kolena-ci-cache
key: *ci-base-cache
paths:
- /home/circleci/project/.poetry/virtualenvs
- poetry.lock
Expand All @@ -44,12 +44,51 @@ jobs:
name: Run unit tests
command: |
poetry run pytest -vv --cov=kolena --cov-branch tests/unit
- when:
# Generate coverage only from one python version
condition:
equal: [ "3.9", << parameters.python-version >> ]
steps:
- run:
name: Coverage
command: |
poetry run coverage xml --data-file .coverage
- codecov/upload:
file: coverage.xml

integration-test:
parameters:
python-version:
type: string
default: "3.9"
pytest-group:
type: string
default: "generic"
docker:
- image: cimg/python:<< parameters.python-version >>
resource_class: small
environment:
POETRY_CACHE_DIR: /home/circleci/project/.poetry
steps:
- checkout
- restore_cache:
key: ci-cache-<< parameters.python-version >>-{{ checksum "pyproject.toml" }}
- run:
name: Run integration tests
name: Run << parameters.pytest-group >> integration tests
command: |
export KOLENA_TOKEN=${KOLENA_TOKEN}
export KOLENA_CLIENT_BASE_URL=${KOLENA_CLIENT_BASE_URL}
poetry run pytest -vv --cov-append --durations=0 --cov=kolena --cov-branch tests/integration
TEST_GROUP="<< parameters.pytest-group >>"
if [ "$TEST_GROUP" = "misc" ]; then
poetry run pytest -vv --durations=0 --cov=kolena --cov-branch \
--ignore=tests/integration/classification \
--ignore=tests/integration/detection \
--ignore=tests/integration/generic \
--ignore=tests/integration/fr \
tests/integration
else
poetry run pytest -vv --durations=0 --cov=kolena --cov-branch tests/integration/$TEST_GROUP
fi
- when:
# Generate coverage only from one python version
condition:
Expand All @@ -65,7 +104,15 @@ jobs:
workflows:
ci:
jobs:
- kolena-ci:
- unit-test:
name: unit-test-<< matrix.python-version >>
matrix:
parameters:
python-version: [ "3.7", "3.8", "3.9", "3.10" ]
- integration-test:
matrix:
parameters:
python-version: [ "3.9" ]
pytest-group: [ "detection", "fr", "generic", "misc" ]
requires:
- unit-test-<< matrix.python-version >>
146 changes: 146 additions & 0 deletions tests/integration/detection/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
# Copyright 2021-2023 Kolena Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List

import pytest

from kolena.detection import Model
from kolena.detection import TestCase
from kolena.detection import TestImage
from kolena.detection import TestSuite
from kolena.detection.ground_truth import BoundingBox
from kolena.detection.ground_truth import ClassificationLabel
from kolena.detection.ground_truth import SegmentationMask
from tests.integration.helper import fake_locator
from tests.integration.helper import with_test_prefix


@dataclass(frozen=True)
class TestData:
test_cases: List[TestCase]
test_suites: List[TestSuite]
models: List[Model]
locators: List[str]


@pytest.fixture(scope="session")
def detection_test_data() -> TestData:
ground_truths = [
ClassificationLabel("car"),
ClassificationLabel("bike"),
BoundingBox("boat", top_left=(0.0, 1.5), bottom_right=(0.3, 3.4)),
SegmentationMask("van", [(4.0, 1.5), (0.9, 3.4), (19.5, 17.6), (8, 8)]),
BoundingBox("boat", top_left=(50, 60), bottom_right=(60, 100)),
BoundingBox("pedestrian", top_left=(120, 70), bottom_right=(190, 100)),
SegmentationMask("truck", [(0, 15), (0.9, 3.4), (19.5, 17.6), (0, 15)]),
SegmentationMask("airplane", [(4.0, 1.5), (0.9, 3.4), (19.5, 17.6), (8, 8)]),
]
dataset = with_test_prefix("fake-data-set")
images = [(fake_locator(i, "detection/base"), {"example": "metadata", "i": i}) for i in range(5)]

test_case_a = TestCase(
with_test_prefix("A"),
description="filler",
images=[
TestImage(locator=images[0][0], dataset=dataset, metadata=images[0][1], ground_truths=[ground_truths[0]]),
TestImage(locator=images[1][0], dataset=dataset, metadata=images[1][1]),
],
)
test_case_a_updated = TestCase(
with_test_prefix("A"),
description="description",
images=[
TestImage(locator=images[0][0], dataset=dataset, metadata=images[0][1], ground_truths=[ground_truths[0]]),
TestImage(locator=images[1][0], dataset=dataset, metadata=images[1][1]),
TestImage(locator=images[2][0], dataset=dataset, metadata=images[2][1], ground_truths=[ground_truths[2]]),
TestImage(locator=images[4][0], dataset=dataset, metadata=images[4][1]),
],
reset=True,
)
test_case_b = TestCase(
with_test_prefix("B"),
description="fields",
images=[
TestImage(
locator=images[2][0],
dataset=dataset,
metadata=images[2][1],
ground_truths=[ground_truths[1], ground_truths[2]],
),
TestImage(locator=images[3][0], dataset=dataset, metadata=images[3][1], ground_truths=[ground_truths[4]]),
],
)
test_case_b_updated = TestCase(
with_test_prefix("B"),
description="etc",
images=[
TestImage(locator=images[1][0], dataset=dataset, metadata=images[1][1]),
TestImage(
locator=images[2][0],
dataset=dataset,
metadata=images[2][1],
ground_truths=[
ground_truths[2],
ground_truths[3],
],
),
TestImage(
locator=images[3][0],
dataset=dataset,
metadata=images[3][1],
ground_truths=[
ground_truths[5],
ground_truths[7],
],
),
],
reset=True,
)
test_case_b_subset = TestCase(
with_test_prefix("B_subset"),
description="and more!",
images=[
TestImage(locator=images[3][0], dataset=dataset, metadata=images[3][1], ground_truths=[ground_truths[6]]),
],
)

test_cases = [test_case_a, test_case_a_updated, test_case_b, test_case_b_updated, test_case_b_subset]

test_suite_name_a = with_test_prefix("A")
test_suite_a = TestSuite(test_suite_name_a, description="filler", test_cases=[test_case_a, test_case_b])
test_suite_a_updated = TestSuite(
test_suite_name_a,
description="description",
test_cases=[test_case_a_updated, test_case_b],
reset=True,
)
test_suite_b = TestSuite(with_test_prefix("B"), description="fields", test_cases=[test_case_b_updated])
test_suite_a_subset = TestSuite(
with_test_prefix("A_subset"),
description="etc",
test_cases=[test_case_b_subset],
)

test_suites = [test_suite_a, test_suite_a_updated, test_suite_b, test_suite_a_subset]

models = [
Model(with_test_prefix("a"), metadata={"some": "metadata"}),
Model(with_test_prefix("b"), metadata={"one": 1, "false": False}),
]

return TestData(test_cases=test_cases, test_suites=test_suites, models=models, locators=[img[0] for img in images])


pytest.register_assert_rewrite("tests.integration.detection.helper")
103 changes: 103 additions & 0 deletions tests/integration/detection/helper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Copyright 2021-2023 Kolena Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
from typing import List
from typing import Tuple

from kolena.detection import ground_truth
from kolena.detection import inference
from kolena.detection import TestImage

fake_labels = [
"car",
"bike",
"house",
"airplane",
"boat",
"bus",
"animal",
"person",
"cow",
"cat",
"dog",
"parakeet",
"weasel",
"rabbit",
"mouse",
"rat",
"anteater",
"aardvark",
"whale",
"seal",
"walrus",
"butterfly",
"hawk",
"pigeon",
"goose",
]


def fake_label() -> str:
return random.choice(fake_labels)


def fake_points(n: int) -> List[Tuple[float, float]]:
return [(round(random.random() * 300, 3), round(random.random(), 3)) for _ in range(n)]


def fake_gt_classification_label() -> ground_truth.ClassificationLabel:
return ground_truth.ClassificationLabel(fake_label())


def fake_gt_bounding_box() -> ground_truth.BoundingBox:
return ground_truth.BoundingBox(fake_label(), *fake_points(2))


def fake_gt_segmentation_mask() -> ground_truth.SegmentationMask:
return ground_truth.SegmentationMask(fake_label(), fake_points(random.randint(3, 15)))


def fake_confidence() -> float:
return round(random.random(), 3)


def fake_inference_classification_label() -> inference.ClassificationLabel:
return inference.ClassificationLabel(fake_label(), fake_confidence())


def fake_inference_bounding_box() -> inference.BoundingBox:
return inference.BoundingBox(fake_label(), fake_confidence(), *fake_points(2))


def fake_inference_segmentation_mask() -> inference.SegmentationMask:
return inference.SegmentationMask(fake_label(), fake_confidence(), fake_points(random.randint(3, 15)))


def assert_test_image_equal(a: TestImage, b: TestImage) -> None:
assert a.locator == b.locator
assert a.dataset == b.dataset
assert a.metadata == b.metadata
assert sorted(a.ground_truths, key=lambda x: json.dumps(x._to_dict(), sort_keys=True)) == sorted(
b.ground_truths,
key=lambda x: json.dumps(x._to_dict(), sort_keys=True),
)


def assert_test_images_equal(actual: List[TestImage], expected: List[TestImage]) -> None:
assert len(actual) == len(expected)
actual = sorted(actual, key=lambda x: x.locator)
expected = sorted(expected, key=lambda x: x.locator)
for a, b in zip(actual, expected):
assert_test_image_equal(a, b)
Loading

0 comments on commit 295f79f

Please sign in to comment.