Skip to content

Commit

Permalink
(app): added concurrency
Browse files Browse the repository at this point in the history
  • Loading branch information
happer64bit committed Aug 6, 2024
1 parent 62c0bdd commit 194ac59
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 25 deletions.
40 changes: 34 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@

- **HTTP Methods**: Test GET, POST, PUT, and DELETE requests.
- **Validation**: Check status codes, content types, and body content.
- **Conditions**: Validate if responses contain specific values or if numeric fields meet criteria.
- **Conditions**: Validate if responses contain specific values, match exact values, or if numeric fields meet criteria (less than, greater than, equal to).
- **Concurrency**: Run tests in parallel to improve efficiency.
- **Reporting**: Detailed success and failure messages, with optional verbose output.

## Installation
Expand Down Expand Up @@ -43,6 +44,9 @@ run:
- value: 201
contains:
id: 1
equalTo:
make: Toyota
model: Corolla
lessThan:
price: 30000
greaterThan:
Expand All @@ -55,7 +59,7 @@ run:
### Environment Variables
You can use environment variables in your configuration. For example, use `{{VARIABLE_NAME}}` syntax to reference environment variables.
You can use environment variables in your configuration. For example, use `{{VARIABLE_NAME}}` syntax to reference environment variables.

Set environment variables before running your tests:

Expand All @@ -82,16 +86,41 @@ run:

## Usage

Run tests:
Run tests with the `hyperscript` command:

```bash
hyperscript path/to/config.yaml
```

### Options
### Command-Line Arguments

- `config_file`: Path to the YAML configuration file. Default is `hypertest.yml`.

Example:
```bash
hyperscript path/to/config.yaml
```

- `--skip-error`: Continue with the next test on error.
- `--verbose`: Enable detailed logging.

Example:
```bash
hyperscript path/to/config.yaml --skip-error
```

- `--verbose`: Enable detailed logging for more comprehensive output.

Example:
```bash
hyperscript path/to/config.yaml --verbose
```

- `--concurrency`: Set the number of concurrent tests to run. If not specified, tests will run sequentially.

Example:
```bash
hyperscript path/to/config.yaml --concurrency 5
```

## Contributing

Expand All @@ -104,4 +133,3 @@ MIT License. See the [LICENSE](LICENSE) file.
## Contact

For questions, email [happer64bit@gmail.com](mailto:happer64bit@gmail.com).

3 changes: 2 additions & 1 deletion hyperscript_cli/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@ def main():
parser.add_argument('config_file', help="Path to the YAML configuration file", default="hypertest.yml")
parser.add_argument('--skip-error', action='store_true', help="Skip error handling and continue", default=False)
parser.add_argument('--verbose', action='store_true', help="Enable detailed logging", default=False)
parser.add_argument('-c', '--concurrency', type=int, help="Number of concurrent threads (default is no concurrency)", default=None)
args = parser.parse_args()

try:
config = parse_config(args.config_file)
runner = Parser(config, args.skip_error, args.verbose)
runner = Parser(config, args.skip_error, args.verbose, args.concurrency)
runner.run_test()
runner.show_summary()
except Exception as e:
Expand Down
63 changes: 46 additions & 17 deletions hyperscript_cli/parser.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import threading
from typing import Dict, Any, Union, List
from colorama import Fore, init
import requests
Expand All @@ -8,12 +9,13 @@
init(autoreset=True)

class Parser:
def __init__(self, config: Dict[str, Any], skip_error: bool, verbose: bool = False) -> None:
def __init__(self, config: Dict[str, Any], skip_error: bool, verbose: bool = False, concurrency: int = None) -> None:
self.config = self._process_env_vars(config)
self.skip_error = skip_error
self.verbose = verbose
self.success_count = 0
self.fail_count = 0
self.concurrency = concurrency

def run_test(self) -> None:
global_url = self.config['global']['url']
Expand All @@ -22,22 +24,37 @@ def run_test(self) -> None:

print(f"\n{Fore.CYAN}Running tests...\n{Fore.RESET}")

for run in self.config['run']:
name = run['name']
path = run['path']
url = global_url + path
method = run.get('method', 'GET').upper()
headers = {**global_headers, **run.get('headers', {})}
cookies = {**global_cookies, **run.get('cookies', {})}
data = run.get('body', {})

try:
response = requests.request(method, url, headers=headers, cookies=cookies, json=data)
self._check_response(response, run['expect'], name)
except requests.RequestException as e:
self._handle_error(f"{str(e)}", name)
except Exception as e:
self._handle_error(f"Unexpected error occurred: {str(e)}", name)
# Run tests concurrently if concurrency is specified
if self.concurrency:
threads = []
for run in self.config['run']:
t = threading.Thread(target=self._run_single_test, args=(run, global_url, global_headers, global_cookies))
threads.append(t)
t.start()

# Wait for all threads to complete
for t in threads:
t.join()
else:
for run in self.config['run']:
self._run_single_test(run, global_url, global_headers, global_cookies)

def _run_single_test(self, run: Dict[str, Any], global_url: str, global_headers: Dict[str, Any], global_cookies: Dict[str, Any]) -> None:
name = run['name']
path = run['path']
url = global_url + path
method = run.get('method', 'GET').upper()
headers = {**global_headers, **run.get('headers', {})}
cookies = {**global_cookies, **run.get('cookies', {})}
data = run.get('body', {})

try:
response = requests.request(method, url, headers=headers, cookies=cookies, json=data)
self._check_response(response, run['expect'], name)
except requests.RequestException as e:
self._handle_error(f"{str(e)}", name)
except Exception as e:
self._handle_error(f"Unexpected error occurred: {str(e)}", name)

def _check_response(self, response: requests.Response, expect: Dict[str, Any], name: str) -> None:
try:
Expand All @@ -47,6 +64,7 @@ def _check_response(self, response: requests.Response, expect: Dict[str, Any], n
contains = expect.get('contains')
less_than = expect.get('lessThan')
greater_than = expect.get('greaterThan')
equal_to = expect.get('equalTo')

# Check response status code
if isinstance(expected_status, list):
Expand Down Expand Up @@ -84,6 +102,11 @@ def _check_response(self, response: requests.Response, expect: Dict[str, Any], n
self._handle_error(f"RESPONSE VALUES ARE NOT GREATER THAN EXPECTED", name)
return

# Check if response values are equal to expected values
if equal_to and not self._compare_equal_to(response_json, equal_to):
self._handle_error(f"RESPONSE VALUES DO NOT MATCH EXPECTED", name)
return

self._log_success(name)
except Exception as e:
self._handle_error(f"ERROR DURING RESPONSE VALIDATION: {str(e)}", name)
Expand Down Expand Up @@ -114,6 +137,12 @@ def _compare_greater_than(self, actual: Any, greater_than: Dict[str, Any]) -> bo
return False
return True

def _compare_equal_to(self, actual: Any, equal_to: Dict[str, Any]) -> bool:
for key, value in equal_to.items():
if key not in actual or actual[key] != value:
return False
return True

def _handle_error(self, message: str, name: str) -> None:
simplified_message = message
if 'Max retries exceeded' in message:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

setup(
name='hyperscript-cli',
version='1.0.4',
version='1.0.6',
description='Powerful HTTP Request Tester',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
Expand Down

0 comments on commit 194ac59

Please sign in to comment.