Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add add_run command #240

Merged
merged 6 commits into from
Aug 30, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ The TestRail CLI currently supports:
- **Uploading automated test results from JUnit reports**
- **Uploading automated test results from Robot Framework reports**
- **Auto-generating test cases from OpenAPI specifications**
- **Creating new test runs for results to be uploaded to.**

To see further documentation about the TestRail CLI, please refer to the
[TestRail CLI documentation pages](https://support.gurock.com/hc/en-us/articles/7146548750868-TestRail-CLI)
Expand Down Expand Up @@ -37,6 +38,7 @@ Supported and loaded modules:
- parse_junit: JUnit XML Files (& Similar)
- parse_robot: Robot Framework XML Files
- parse_openapi: OpenAPI YML Files
- add_run: Create a new empty test run
```

CLI general reference
Expand Down Expand Up @@ -73,6 +75,7 @@ Commands:
parse_junit Parse JUnit report and upload results to TestRail
parse_openapi Parse OpenAPI spec and create cases in TestRail
parse_robot Parse Robot Framework report and upload results to TestRail
add_run Create a new test run (useful for CI/CD flows prior to parsing results)
```

Uploading automated test results
Expand Down Expand Up @@ -245,6 +248,45 @@ the `--special-parser saucectl` command line option.
Please refer to the [SauceLabs and saucectl reports](https://support.gurock.com/hc/en-us/articles/12719558686484)
documentation for further information.

#### Creating new test runs

When a test run MUST created before using one of the parse commands, use the `add_run` command. For example, if
tests are run across parallel, independent test nodes, all nodes should report their results into the same test run.
First, use the `add_run` command to create a new run; then, pass the run title and id to each of the test nodes, which
will be used to upload all results into the same test run.

### Reference
```shell
$ trcli add_run --help
TestRail CLI v1.9.5
Copyright 2024 Gurock Software GmbH - www.gurock.com
Usage: trcli add_run [OPTIONS]

Options:
--title Title of Test Run to be created or updated in
TestRail.
--suite-id Suite ID to submit results to. [x>=1]
--run-description Summary text to be added to the test run.
--milestone-id Milestone ID to which the Test Run should be
associated to. [x>=1]
--run-assigned-to-id The ID of the user the test run should be assigned
to. [x>=1]
--include-all
--case-ids Comma separated list of test case IDs to include in
the test run.
--run-refs A comma-separated list of references/requirements
-f, --file Write run title and id to file.
--help Show this message and exit.
```

If the file parameter is used, the run title and id are written to the file in yaml format. Example:
```text
title: Run Title
run_id: 1
```

This file can be used as the config file (or appended to an existing config file) in a later run.

Generating test cases from OpenAPI specs
-----------------

Expand Down
18 changes: 18 additions & 0 deletions tests/test_api_data_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,24 @@ def test_post_run(self, post_data_provider):
post_data_provider.add_run("test run") == post_run_bodies
), "Adding run data doesn't match expected body"

@pytest.mark.data_provider
def test_post_run_all_args(self, post_data_provider):
"""Check body for adding run"""
suite_updater = [
{
"suite_id": 123,
}
]
post_data_provider.update_data(suite_data=suite_updater)
assert (
post_data_provider.add_run(
"test run",
assigned_to_id=1,
include_all=True,
refs="SAN-1, SAN-2"
) == post_run_full_body
), "Adding run full data doesn't match expected body"

@pytest.mark.data_provider
def test_post_results_for_cases(self, post_data_provider):
"""Check body for adding results"""
Expand Down
48 changes: 48 additions & 0 deletions tests/test_cmd_add_run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from unittest import mock

from trcli.cli import Environment
from trcli.commands import cmd_add_run


class TestCmdAddRun:
@mock.patch("builtins.open", new_callable=mock.mock_open)
def test_write_run_to_file(self, mock_open_file):
"""The purpose of this test is to check that calling the write_run_to_file method
writes the correct yaml file excluding optional data."""
title = "Test run 1"
run_id = 1
file = "/fake/path/out.yaml"
environment = Environment(cmd="add_run")
environment.title = title
environment.file = file
expected_string = f"run_id: {run_id}\ntitle: {title}\n"

cmd_add_run.write_run_to_file(environment, run_id)
mock_open_file.assert_called_with(file, "a")
mock_open_file.return_value.__enter__().write.assert_called_once_with(expected_string)

@mock.patch("builtins.open", new_callable=mock.mock_open)
def test_write_run_to_file_with_refs_and_description(self, mock_open_file):
"""The purpose of this test is to check that calling the write_run_to_file method
writes the correct yaml file including optional data."""
title = "Test run 1"
run_id = 1
file = "/fake/path/out.yaml"
description = "test description"
refs = "JIRA-100"
case_ids = "1234"
assigned_to_id = 1
environment = Environment(cmd="add_run")
environment.title = title
environment.file = file
environment.run_refs = refs
environment.run_description = description
environment.run_assigned_to_id = assigned_to_id
environment.run_case_ids = case_ids
environment.run_include_all = True
expected_string = (f"run_assigned_to_id: {assigned_to_id}\nrun_case_ids: '{case_ids}'\n"
f"run_description: {description}\nrun_id: {run_id}\n"
f"run_include_all: true\nrun_refs: {refs}\ntitle: {title}\n")
cmd_add_run.write_run_to_file(environment, run_id)
mock_open_file.assert_called_with(file, "a")
mock_open_file.return_value.__enter__().write.assert_called_once_with(expected_string)
12 changes: 11 additions & 1 deletion tests/test_data/api_data_provider_test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,20 @@
"name": "test run",
"suite_id": 123,
"case_ids": [60, 4],
"include_all": False,
"milestone_id": None
}

post_run_full_body = {
"description": "logging: True\ndebug: False",
"name": "test run",
"suite_id": 123,
"case_ids": [60, 4],
"milestone_id": None,
"assignedto_id": 1,
"include_all": True,
"refs": "SAN-1, SAN-2"
}

post_results_for_cases_body = [
{
"results": [
Expand Down
3 changes: 2 additions & 1 deletion tests/test_data/cli_test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
trcli_description = ('Supported and loaded modules:\n'
' - parse_junit: JUnit XML Files (& Similar)\n'
' - parse_robot: Robot Framework XML Files\n'
' - parse_openapi: OpenAPI YML Files\n')
' - parse_openapi: OpenAPI YML Files\n'
' - add_run: Create a new test run\n')

trcli_help_description = "TestRail CLI"
32 changes: 32 additions & 0 deletions tests/test_data/project_based_client_test_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from trcli.constants import FAULT_MAPPING

TEST_GET_SUITE_ID_PROMPTS_USER_TEST_DATA = [
(True, 10, 1, "Adding missing suites to project Fake project name.", False),
(True, 10, -1, "Adding missing suites to project Fake project name.", True),
(False, -1, -1, FAULT_MAPPING["no_user_agreement"].format(type="suite"), False),
]

TEST_GET_SUITE_ID_PROMPTS_USER_IDS = [
"user agrees",
"user agrees, fail to add suite",
"used does not agree",
]

TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_TEST_DATA = [
(([], "Could not get suites"), -1, -1, "Could not get suites"),
(([10], ""), -1, 1, ""),
(
([10, 11, 12], ""),
-1,
-1,
FAULT_MAPPING["not_unique_suite_id_single_suite_baselines"].format(
project_name="Fake project name"
),
),
]

TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_IDS = [
"get_suite_ids fails",
"get_suite_ids returns one ID",
"get_suite_ids returns more than one ID",
]
28 changes: 0 additions & 28 deletions tests/test_data/results_provider_test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,6 @@
"add_results_failed",
"close_run_failed",
]
TEST_GET_SUITE_ID_PROMPTS_USER_TEST_DATA = [
(True, 10, 1, "Adding missing suites to project Fake project name.", False),
(True, 10, -1, "Adding missing suites to project Fake project name.", True),
(False, -1, -1, FAULT_MAPPING["no_user_agreement"].format(type="suite"), False),
]
TEST_GET_SUITE_ID_PROMPTS_USER_IDS = [
"user agrees",
"user agrees, fail to add suite",
"used does not agree",
]
TEST_ADD_MISSING_SECTIONS_PROMPTS_USER_TEST_DATA = [
(
True,
Expand Down Expand Up @@ -88,24 +78,6 @@
"user agrees, test cases not added",
"used does not agree",
]
TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_TEST_DATA = [
(([], "Could not get suites"), -1, -1, "Could not get suites"),
(([10], ""), -1, 1, ""),
(
([10, 11, 12], ""),
-1,
-1,
FAULT_MAPPING["not_unique_suite_id_single_suite_baselines"].format(
project_name="Fake project name"
),
),
]

TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_IDS = [
"get_suite_ids fails",
"get_suite_ids returns one ID",
"get_suite_ids returns more than one ID",
]

TEST_REVERT_FUNCTIONS_AND_EXPECTED = [
(
Expand Down
Loading