Skip to content

Commit

Permalink
Fix tests and imports
Browse files Browse the repository at this point in the history
  • Loading branch information
Thomsch committed Dec 12, 2023
1 parent cf495f0 commit 5ca9cf7
Show file tree
Hide file tree
Showing 9 changed files with 26 additions and 15 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ format-python:
black ${PYTHON_FILES}

python-test:
PYTHONPATH="${MAKEFILE_DIR}/src/python/main" pytest src/python/test
pytest src/python/test

shell-test:
bats "${MAKEFILE_DIR}/src/bash/test"
Expand Down
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,10 @@ The detailed pipeline can be visualized in [diagrams/pipeline.drawio.svg](diagra
In addition to `data/d4j-5-bugs.csv`, there are 1 other pre-computed bug file that you can use: `data/d4j-compatible-bugs.csv` contains all the Defects4J bugs that are active and compatible with the experimental infrastructure. Bug projects marked as deprecated by Defects4J's authors are not included in this list of compatible bugs.
To generate a new bug file, see **Generating the bug file** section.

#### Running Python scripts
Pythons scripts are in the same directory structure as the python modules, making testing and reusing modules difficult.
The solution is to run python scripts as module (`python -m`) so the scripts can import other modules directly and be compatible with tests.

#### Optional steps
Run `./compute_metrics.sh data/d4j-5-bugs.csv $UTB_OUTPUT` to compute the metrics of the D4J bugs. See section [Metrics](#metrics) for more details.

Expand All @@ -120,8 +124,6 @@ Do not use `data/d4j-bugs-all.csv` as it contains bugs that are deprecated or no
### Untangling one Defects4J bug
If you only want to evaluate the decomposition of one Defects4J bug, you can follow the pipeline presented in [diagrams/pipeline.drawio.svg](diagrams/pipeline.drawio.svg).

### Aggregating decomposition elapsed time

## Tests
- Run `make check` to run all the checks (tests, linting, etc.) for bash and Python.

Expand Down
10 changes: 10 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[project]
name = "untanglingevaluation"
version = "1.0"

[pytest]
python_paths = "src/python/main;src/python/test"
7 changes: 3 additions & 4 deletions src/python/main/analysis/print_performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,8 @@

import pandas as pd

sys.path.insert(1, os.path.join(sys.path[0], '..'))
import latex_utils
import evaluation_results
from src.python.main.analysis import latex_utils
from src.python.main import evaluation_results


def main(d4j_file:str, lltc4j_file:str, aggregator:str, overall:bool):
Expand Down Expand Up @@ -82,7 +81,7 @@ def print_performance_commands(df_performance, aggregator_operation):
"""
for dataset in df_performance.index:
for tool in df_performance.columns:
value = df_performance.loc[dataset, tool].round(PRECISION)
value = df_performance.loc[dataset, tool].round(latex_utils.PRECISION)
dataset_name_for_latex = dataset.lower().replace('4', 'f')
tool_name_for_latex = tool.capitalize().replace('-', '')
aggreator_for_latex = aggregator_operation.capitalize()
Expand Down
2 changes: 1 addition & 1 deletion src/python/main/evaluation_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import pandas as pd

import tangled_metrics
from . import tangled_metrics

from pandas.api.types import CategoricalDtype

Expand Down
2 changes: 1 addition & 1 deletion src/python/main/ground_truth.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
import pandas as pd
from unidiff import PatchSet, LINE_TYPE_CONTEXT

from diff_metrics import lines_in_patch
from .diff_metrics import lines_in_patch

COL_NAMES = ["file", "source", "target"]

Expand Down
Empty file.
4 changes: 2 additions & 2 deletions src/python/main/tangled_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
import pandas as pd
import numpy as np

import metrics
import evaluation_results
from . import metrics
from . import evaluation_results
from pandas.api.types import CategoricalDtype

TANGLED_LEVELS = [
Expand Down
8 changes: 4 additions & 4 deletions src/python/test/analysis/test_print_performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""
import pytest

import src.python.main.analysis.print_performance as print_median_performance
import src.python.main.analysis.print_performance as print_performance


@pytest.fixture
Expand Down Expand Up @@ -39,7 +39,7 @@ def test_calculate_performance(sample_d4j_scores, sample_lltc4j_scores, capfd):
"""
Tests that the performance metrics are calculated correctly.
"""
print_median_performance.main(sample_d4j_scores, sample_lltc4j_scores, "median", False)
print_performance.main(sample_d4j_scores, sample_lltc4j_scores, "median", False)

captured = capfd.readouterr()

Expand Down Expand Up @@ -72,7 +72,7 @@ def test_calculate_performance(sample_d4j_scores, sample_lltc4j_scores, capfd):
"""
Tests that the performance metrics are calculated correctly.
"""
print_median_performance.main(sample_d4j_scores, sample_lltc4j_scores, "mean", False)
print_performance.main(sample_d4j_scores, sample_lltc4j_scores, "mean", False)

captured = capfd.readouterr()

Expand Down Expand Up @@ -106,7 +106,7 @@ def test_calculate_performance_overall(sample_d4j_scores, sample_lltc4j_scores,
"""
Tests that the performance metrics are calculated correctly.
"""
print_median_performance.main(sample_d4j_scores, sample_lltc4j_scores, "mean", True)
print_performance.main(sample_d4j_scores, sample_lltc4j_scores, "mean", True)

captured = capfd.readouterr()

Expand Down

0 comments on commit 5ca9cf7

Please sign in to comment.