Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change the test code to use pytest #124

Merged
merged 32 commits into from
Oct 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
aead0e7
add: test file
cjho0316 Sep 21, 2024
cbdd7c4
refactor: add testcode
cjho0316 Sep 27, 2024
9458be2
fix: fix lint and field
cjho0316 Sep 27, 2024
79835ec
fix: fix lint and field in help
cjho0316 Sep 27, 2024
8cd0d10
fix: fix lint and field in common
cjho0316 Sep 27, 2024
65cfbf5
fix: fix lint and field in realcommon
cjho0316 Sep 27, 2024
c3dc62a
fix: fix lint and field in run_compare
cjho0316 Sep 27, 2024
a1b3dcf
fix: fix lint in run_compare, cli, scanner
cjho0316 Sep 28, 2024
2c899d1
fix: fix lint in common
cjho0316 Sep 28, 2024
873d9e3
refactor: alter .ini file pytest logics
cjho0316 Sep 28, 2024
b91372c
refactor: add .ini dependency
cjho0316 Sep 30, 2024
5dd6e76
fix: deal initial CI error
cjho0316 Oct 1, 2024
7d9176a
fix: deal CI error no2
cjho0316 Oct 1, 2024
667395a
fix: deal CI error no3
cjho0316 Oct 1, 2024
b505eee
fix: deal CI error no4
cjho0316 Oct 1, 2024
0b65f69
fix: deal CI error no5
cjho0316 Oct 1, 2024
a9101ab
fix: test__get_input
cjho0316 Oct 4, 2024
8febb0e
fix: test__parse_setting
cjho0316 Oct 4, 2024
c750857
fix: test__run_compare
cjho0316 Oct 4, 2024
7a9e03a
fix: test__run_compare
cjho0316 Oct 4, 2024
0a8c76b
fix: test__common
cjho0316 Oct 4, 2024
9364d54
fix: test__cli
cjho0316 Oct 4, 2024
36e972a
fix: test__parse_setting
cjho0316 Oct 4, 2024
3c87990
add: LG disclaimer
cjho0316 Oct 4, 2024
41c86a9
chore: exclude field
cjho0316 Oct 4, 2024
6640bee
Merge branch 'main' into main
cjho0316 Oct 4, 2024
cc10a96
fix: CI error
cjho0316 Oct 6, 2024
8272bcb
Merge branch 'main' of https://github.com/cjho0316/fosslight_scanner
cjho0316 Oct 6, 2024
6e77924
fix: lint error
cjho0316 Oct 6, 2024
08e6eb2
Update requirements.txt
soimkim Oct 6, 2024
6d99cae
fix: deactivate comment in test_run_dependency
cjho0316 Oct 6, 2024
151e58c
add: add crete_scancode, correct_scanner
cjho0316 Oct 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added tests/__init__.py
Empty file.
3 changes: 0 additions & 3 deletions tests/requirements.txt

This file was deleted.

44 changes: 44 additions & 0 deletions tests/test__get_input.py
cjho0316 marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0


from fosslight_scanner._get_input import get_input, get_input_mode


def test_get_input(monkeypatch):
cjho0316 marked this conversation as resolved.
Show resolved Hide resolved
# given
ask_msg = "Please enter the path to analyze:"
default_value = "default"

# when
# Mock input to return an empty string
monkeypatch.setattr('builtins.input', lambda _: "")
result_no_input = get_input(ask_msg, default_value)

# Mock input to return "user_input"
monkeypatch.setattr('builtins.input', lambda _: "user_input")
result_with_input = get_input(ask_msg, "user_input")

# then
assert result_no_input == "default"
assert result_with_input == "user_input"


def test_get_input_mode(monkeypatch):
# given
executed_path = ""
mode_list = ["all", "dep"]

# Mock ask_to_run to return a predetermined input value
monkeypatch.setattr('fosslight_scanner._get_input.ask_to_run', lambda _: "1")

# Mock input to provide other necessary return values
monkeypatch.setattr('builtins.input', lambda _: "https://example.com")

# when
_, _, url_to_analyze = get_input_mode(executed_path, mode_list)

# then
assert url_to_analyze == "https://example.com"
22 changes: 22 additions & 0 deletions tests/test__help.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0


import sys
from fosslight_scanner._help import print_help_msg, _HELP_MESSAGE_SCANNER


def test_print_help_msg(capsys, monkeypatch):
# given
# monkeypatch sys.exit to prevent the test from stopping
monkeypatch.setattr(sys, "exit", lambda: None)

# when
print_help_msg()

# then
captured = capsys.readouterr()
# Validate the help message output
assert _HELP_MESSAGE_SCANNER.strip() in captured.out
35 changes: 35 additions & 0 deletions tests/test__parse_setting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0

from fosslight_scanner._parse_setting import parse_setting_json


def test_parse_setting_json_valid_data():
data = {
'mode': ['test'],
'path': ['/some/path'],
'dep_argument': 'arg',
'output': 'output',
'format': 'json',
'link': 'http://example.com',
'db_url': 'sqlite:///:memory:',
'timer': True,
'raw': True,
'core': 4,
'no_correction': True,
'correct_fpath': '/correct/path',
'ui': True,
'exclude': ['/exclude/path'],
'selected_source_scanner': 'scanner',
'source_write_json_file': True,
'source_print_matched_text': True,
'source_time_out': 60,
'binary_simple': True
}
result = parse_setting_json(data)
assert result == (
['test'], ['/some/path'], 'arg', 'output', 'json', 'http://example.com', 'sqlite:///:memory:', True,
True, 4, True, '/correct/path', True, ['/exclude/path'], 'scanner', True, True, 60, True
)
199 changes: 199 additions & 0 deletions tests/test__run_compare.py
cjho0316 marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0


import pytest
from fosslight_scanner._run_compare import write_result_json_yaml, parse_result_for_table, get_sample_html, \
write_result_html, write_result_xlsx, write_compared_result, get_comparison_result_filename, \
count_compared_result, run_compare, \
ADD, DELETE, CHANGE, XLSX_EXT, HTML_EXT, YAML_EXT, JSON_EXT
import logging
import json
import yaml


@pytest.mark.parametrize("ext, expected_content", [
# Test for JSON and YAML extensions
(".json", {"key": "value"}),
(".yaml", {"key": "value"}),

# Test for TXT extension (failure)
(".txt", {"key": "value"}),
])
def test_write_result_json_yaml(tmp_path, ext, expected_content):
# given
output_file = tmp_path / f"result{ext}"
compared_result = expected_content

# when
success = write_result_json_yaml(output_file, compared_result, ext)

# then
assert success is True, f"Failed to write file with extension {ext}"

# Verify content based on extension
if ext == ".json":
with open(output_file, 'r', encoding='utf-8') as f:
result_content = json.load(f)
assert result_content == compared_result, "The content of the JSON file does not match the expected content."

elif ext == ".yaml":
with open(output_file, 'r', encoding='utf-8') as f:
result_content = yaml.safe_load(f)
assert result_content == compared_result, "The content of the YAML file does not match the expected content."

elif ext == ".txt":
with open(output_file, 'r', encoding='utf-8') as f:
result_lines = f.readlines()
result_content = ''.join(result_lines)
assert result_content != compared_result, "The content of the TXT file does not match the expected string representation."


def test_parse_result_for_table():
cjho0316 marked this conversation as resolved.
Show resolved Hide resolved
# given
add_expected = [ADD, '', '', 'test(1.0)', 'MIT']
oi = {"name": "test", "version": "1.0", "license": ["MIT"]}

# when
add_result = parse_result_for_table(oi, ADD)

# then
assert add_result == add_expected


def test_get_sample_html():
# then
assert get_sample_html() != ''


@pytest.mark.parametrize("compared_result, expected_before, expected_after", [
# Case with empty add, delete, change
({ADD: [], DELETE: [], CHANGE: []}, "before.yaml", "after.yaml"),

# Case with one entry in add and no deletes or changes
({ADD: [{"name": "test", "version": "1.0", "license": ["MIT"]}], DELETE: [], CHANGE: []},
"before.yaml", "after.yaml")
])
def test_write_result_html(tmp_path, compared_result, expected_before, expected_after):
# given
output_file = tmp_path / "result.html"

# when
success = write_result_html(output_file, compared_result, expected_before, expected_after)

# then
assert success is True, "Failed to write the HTML file."
assert output_file.exists(), "The HTML file was not created."
with open(output_file, 'r', encoding='utf-8') as f:
content = f.read()
assert content, "The HTML file is empty."


@pytest.mark.parametrize("compared_result", [
# Case with empty add, delete, change
{ADD: [], DELETE: [], CHANGE: []},

# Case with one entry in add and no deletes or changes
{ADD: [{"name": "test", "version": "1.0", "license": ["MIT"]}], DELETE: [], CHANGE: []}
])
def test_write_result_xlsx(tmp_path, compared_result):
# given
output_file = tmp_path / "result.xlsx"

# when
success = write_result_xlsx(output_file, compared_result)

# then
assert success is True, "Failed to write the XLSX file."
assert output_file.exists(), "The XLSX file was not created."


@pytest.mark.parametrize("ext, expected_output", [
(XLSX_EXT, "xlsx"),
(HTML_EXT, "html"),
(JSON_EXT, "json"),
(YAML_EXT, "yaml"),
])
def test_write_compared_result(tmp_path, ext, expected_output):
# given
output_file = tmp_path / "result"
compared_result = {ADD: [], DELETE: [], CHANGE: []}

# when
success, result_file = write_compared_result(output_file, compared_result, ext)

# then
assert success is True, f"Failed to write the compared result for extension {ext}"
if ext == XLSX_EXT:
assert str(result_file) == str(output_file), "The XLSX result file path does not match the expected output path."
elif ext == HTML_EXT:
expected_result_file = f"{str(output_file) + XLSX_EXT}, {str(output_file)}"
assert result_file == expected_result_file, "HTML file creation failed."
elif ext == JSON_EXT:
assert str(result_file) == str(output_file), "The JSON result file path does not match the expected output path."
else:
assert str(result_file) == str(output_file), "The YAML result file path does not match the expected output path."


@pytest.mark.parametrize("path, file_name, ext, time_suffix, expected_output", [
# Case when file name is provided
("/path", "file", XLSX_EXT, "time", "/path/file.xlsx"),

# Case when file name is empty, with different extensions
("/path", "", XLSX_EXT, "time", "/path/fosslight_compare_time.xlsx"),
("/path", "", HTML_EXT, "time", "/path/fosslight_compare_time.html"),
("/path", "", YAML_EXT, "time", "/path/fosslight_compare_time.yaml"),
("/path", "", JSON_EXT, "time", "/path/fosslight_compare_time.json"),
])
def test_get_comparison_result_filename(path, file_name, ext, time_suffix, expected_output):
# when
result = get_comparison_result_filename(path, file_name, ext, time_suffix)

# then
assert result == expected_output, f"Expected {expected_output} but got {result}"


@pytest.mark.parametrize("compared_result, expected_log", [
({ADD: [], DELETE: [], CHANGE: []}, "all oss lists are the same."),
({ADD: [{"name": "test"}], DELETE: [], CHANGE: []}, "total 1 oss updated (add: 1, delete: 0, change: 0)")
])
def test_count_compared_result(compared_result, expected_log, caplog):
# when
with caplog.at_level(logging.INFO):
count_compared_result(compared_result)
# then
assert expected_log in caplog.text


def test_run_compare_different_extension(tmp_path):
# given
before_f = tmp_path / "before.yaml"
after_f = tmp_path / "after.xlsx"
output_path = tmp_path
output_file = "result"
file_ext = ".yaml"
_start_time = "time"
_output_dir = tmp_path

# Write example content to before_f and after_f
before_content = {
"oss_list": [
{"name": "test", "version": "1.0", "license": "MIT"}
]
}

# Write these contents to the files
with open(before_f, "w") as bf:
yaml.dump(before_content, bf)

# Create an empty xlsx file for after_f
with open(after_f, "w") as af:
af.write("")

# when
comparison_result = run_compare(before_f, after_f, output_path, output_file, file_ext, _start_time, _output_dir)

# then
assert comparison_result is False
71 changes: 71 additions & 0 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0

import pytest
import json
import sys
from fosslight_scanner.cli import main, set_args


def test_set_args(monkeypatch):
# Mocking os.path.isfile to return True
monkeypatch.setattr("os.path.isfile", lambda x: True)

# Mocking the open function to return a mock file object
mock_file_data = json.dumps({
"mode": ["test_mode"],
"path": ["test_path"],
"dep_argument": "test_dep_argument",
"output": "test_output",
"format": "test_format",
"link": "test_link",
"db_url": "test_db_url",
"timer": True,
"raw": True,
"core": 4,
"no_correction": True,
"correct_fpath": "test_correct_fpath",
"ui": True,
"exclude": ["test_exclude_path"],
"selected_source_scanner": "test_scanner",
"source_write_json_file": True,
"source_print_matched_text": True,
"source_time_out": 100,
"binary_simple": True
})

def mock_open(*args, **kwargs):
from io import StringIO
return StringIO(mock_file_data)

monkeypatch.setattr("builtins.open", mock_open)

# Call the function with some arguments
result = set_args(
mode=None, path=None, dep_argument=None, output=None, format=None, link=None, db_url=None, timer=None,
raw=None, core=-1, no_correction=None, correct_fpath=None, ui=None, setting="dummy_path", exclude_path=None
)

# Expected result
expected = (
["test_mode"], ["test_path"], "test_dep_argument", "test_output", "test_format", "test_link", "test_db_url", True,
True, 4, True, "test_correct_fpath", True, ["test_exclude_path"], "test_scanner", True, True, 100, True
)

assert result == expected


def test_main_invalid_option(capsys):
# given
test_args = ["fosslight_scanner", "--invalid_option"]
sys.argv = test_args

# when
with pytest.raises(SystemExit): # 예상되는 SystemExit 처리
main()

# then
captured = capsys.readouterr()
assert "unrecognized arguments" in captured.err # 인식되지 않은 인자에 대한 에러 메시지 확인
Loading
Loading