diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b0d679239..b54d0b7373 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,13 +7,35 @@ All notable changes to this project will be documented in this file. ### Fixed - Changed wazuh-agent service start from install test to registration test ([#5762](https://github.com/wazuh/wazuh-qa/pull/5762)) \- (Tests) +- Fix failed to disable wazuh-manager.service ([#5775])(https://github.com/wazuh/wazuh-qa/pull/5775) \- (Tests) + ## [4.9.1] - TBD ### Added +- Added support for macOS 15 (Vagrant) to the Allocation module ([#5743](https://github.com/wazuh/wazuh-qa/pull/5743)) \- (Framework) +- Add Ubuntu 24.04 support to Deployability testing tier 1 ([#5689])(https://github.com/wazuh/wazuh-qa/pull/5689) \- (Tests) - Added support for macOS 14.6 to the Allocation module (Vagrant) ([#5671](https://github.com/wazuh/wazuh-qa/pull/5671)) \- (Framework) +### Changed + +- Update team labels and add 'agent' option ([#5725](https://github.com/wazuh/wazuh-qa/pull/5725)) \- (Framework) +- Change in VD E2E tests to use package feed instead of CTI feed ([#5739](https://github.com/wazuh/wazuh-qa/pull/5739)) \- (Tests) +- Improve VD plots title ([#5740](https://github.com/wazuh/wazuh-qa/pull/5740)) \- (Framework) +- Remove meaningless clean.yaml in DTT1 tests ([#5732])(https://github.com/wazuh/wazuh-qa/pull/5732/) \- (Framework) +- Updated Debian 12 AMIs and Box to 12.7 version ([#5735](https://github.com/wazuh/wazuh-qa/pull/5735)) \- (Framework) + +### Fixed + +- Fix unexpected warnings in E2E vulnerability detection tests ([#5711](https://github.com/wazuh/wazuh-qa/pull/5711)) \- (Framework + Tests) +- Grafana package used for `upgrade_package_nonvulnerable_to_vulnerable` case is vulnerable ([#5719](https://github.com/wazuh/wazuh-qa/pull/5719)) \- (Tests) +- Increase results windows in E2E Vulnerability detection ([#5712](https://github.com/wazuh/wazuh-qa/pull/5712/)) \- (Framework + Tests) + +### Deleted + +- Reverted an xfail behaviour change in the API performance test ([#5734](https://github.com/wazuh/wazuh-qa/pull/5734)) \- (Tests) + ## [4.9.0] - TBD ### Added diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..4c99eec2fc --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,45 @@ +# Wazuh Open Source Project Security Policy + +Version: 2023-06-12 + +## Introduction +This document outlines the Security Policy for Wazuh's open source projects. It emphasizes our commitment to maintain a secure environment for our users and contributors, and reflects our belief in the power of collaboration to identify and resolve security vulnerabilities. + +## Scope +This policy applies to all open source projects developed, maintained, or hosted by Wazuh. + +## Reporting Security Vulnerabilities +If you believe you've discovered a potential security vulnerability in one of our open source projects, we strongly encourage you to report it to us responsibly. + +Please submit your findings as security advisories under the "Security" tab in the relevant GitHub repository. Alternatively, you may send the details of your findings to [security@wazuh.com](mailto:security@wazuh.com). + +## Vulnerability Disclosure Policy +Upon receiving a report of a potential vulnerability, our team will initiate an investigation. If the reported issue is confirmed as a vulnerability, we will take the following steps: + +1. Acknowledgment: We will acknowledge the receipt of your vulnerability report and begin our investigation. +2. Validation: We will validate the issue and work on reproducing it in our environment. +3. Remediation: We will work on a fix and thoroughly test it +4. Release & Disclosure: After 90 days from the discovery of the vulnerability, or as soon as a fix is ready and thoroughly tested (whichever comes first), we will release a security update for the affected project. We will also publicly disclose the vulnerability by publishing a CVE (Common Vulnerabilities and Exposures) and acknowledging the discovering party. +5. Exceptions: In order to preserve the security of the Wazuh community at large, we might extend the disclosure period to allow users to patch their deployments. + +This 90-day period allows for end-users to update their systems and minimizes the risk of widespread exploitation of the vulnerability. + +## Automatic Scanning +We leverage GitHub Actions to perform automated scans of our supply chain. These scans assist us in identifying vulnerabilities and outdated dependencies in a proactive and timely manner. + +## Credit +We believe in giving credit where credit is due. If you report a security vulnerability to us, and we determine that it is a valid vulnerability, we will publicly credit you for the discovery when we disclose the vulnerability. If you wish to remain anonymous, please indicate so in your initial report. + +We do appreciate and encourage feedback from our community, but currently we do not have a bounty program. We might start bounty programs in the future. + +## Compliance with this Policy +We consider the discovery and reporting of security vulnerabilities an important public service. We encourage responsible reporting of any vulnerabilities that may be found in our site or applications. + +Furthermore, we will not take legal action against or suspend or terminate access to the site or services of those who discover and report security vulnerabilities in accordance with this policy because of the fact. + +We ask that all users and contributors respect this policy and the security of our community's users by disclosing vulnerabilities to us in accordance with this policy. + +## Changes to this Security Policy +This policy may be revised from time to time. Each version of the policy will be identified at the top of the page by its effective date. + +If you have any questions about this Security Policy, please contact us at [security@wazuh.com](mailto:security@wazuh.com) \ No newline at end of file diff --git a/deployability/modules/allocation/aws/provider.py b/deployability/modules/allocation/aws/provider.py index e7e9a2ee46..9604b0dd6a 100644 --- a/deployability/modules/allocation/aws/provider.py +++ b/deployability/modules/allocation/aws/provider.py @@ -48,7 +48,7 @@ def _create_instance(cls, base_dir: Path, params: CreationPayload, config: AWSCo temp_id = cls._generate_instance_id(cls.provider_name) temp_dir = base_dir / temp_id credentials = AWSCredentials() - teams = ['qa', 'core', 'framework', 'devops', 'frontend', 'operations', 'cloud', 'threat-intel', 'marketing', 'documentation'] + teams = ['qa', 'cppserver', 'pyserver', 'devops', 'dashboard', 'operations', 'cloud', 'threat-intel', 'marketing', 'documentation', 'agent', 'indexer'] platform = str(params.composite_name.split("-")[0]) arch = str(params.composite_name.split("-")[3]) if not config: diff --git a/deployability/modules/allocation/static/specs/os.yml b/deployability/modules/allocation/static/specs/os.yml index 6a351b744f..62a6d051ef 100644 --- a/deployability/modules/allocation/static/specs/os.yml +++ b/deployability/modules/allocation/static/specs/os.yml @@ -35,7 +35,7 @@ vagrant: virtualizer: virtualbox linux-debian-12-amd64: box: debian/bookworm64 - box_version: 12.20231211.1 + box_version: 12.20240905.1 virtualizer: virtualbox # Oracle Linux linux-oracle-7-amd64: @@ -182,6 +182,14 @@ vagrant: box: development/macos-sonoma-1460 box_version: 0.0.0 virtualizer: parallels + macos-sequoia-15-arm64: + box: macos-15 + box_version: 0.0.0 + virtualizer: parallels + macos-sequoia-15-amd64: + box: development/macos-sequoia + box_version: 0.0.0 + virtualizer: parallels macos-ventura-sign-arm64: box: macos-ventura-sign box_version: 0.0.0 @@ -268,11 +276,11 @@ aws: zone: us-east-1 user: admin linux-debian-12-amd64: - ami: ami-055c8118725fe3a84 + ami: ami-014124f30c18be425 zone: us-east-1 user: admin linux-debian-12-arm64: - ami: ami-06703877c23c4ddf1 + ami: ami-027a194fc587a2e82 zone: us-east-1 user: admin # Oracle Linux @@ -422,7 +430,7 @@ aws: zone: us-east-1 user: ec2-user macos-ventura-13-arm64: - ami: ami-01aa3973cdaf40134 + ami: ami-01aa3973cdaf40134 zone: us-east-1 user: ec2-user macos-sonoma-14-amd64: diff --git a/deployability/modules/jobflow/templates/agent/Test-agents-complete.yaml b/deployability/modules/jobflow/templates/agent/Test-agents-complete.yaml index 9cc63c812c..e1409fb909 100755 --- a/deployability/modules/jobflow/templates/agent/Test-agents-complete.yaml +++ b/deployability/modules/jobflow/templates/agent/Test-agents-complete.yaml @@ -18,6 +18,8 @@ variables: - linux-debian-11-arm64 - linux-debian-12-amd64 - linux-debian-12-arm64 + - linux-ubuntu-24.04-amd64 + - linux-ubuntu-24.04-arm64 - linux-ubuntu-22.04-amd64 - linux-ubuntu-22.04-arm64 - linux-ubuntu-18.04-amd64 diff --git a/deployability/modules/jobflow/templates/central_components/Test-CC-complete.yaml b/deployability/modules/jobflow/templates/central_components/Test-CC-complete.yaml index 3962df7996..ec4e8a1972 100644 --- a/deployability/modules/jobflow/templates/central_components/Test-CC-complete.yaml +++ b/deployability/modules/jobflow/templates/central_components/Test-CC-complete.yaml @@ -4,6 +4,7 @@ variables: central_components-os: - linux-ubuntu-20.04-amd64 - linux-ubuntu-22.04-amd64 + - linux-ubuntu-24.04-amd64 - linux-amazon-2-amd64 - linux-redhat-7-amd64 - linux-redhat-8-amd64 @@ -71,6 +72,7 @@ tasks: depends-on: - "allocate-central_components-linux-ubuntu-20.04-amd64" - "allocate-central_components-linux-ubuntu-22.04-amd64" + - "allocate-central_components-linux-ubuntu-24.04-amd64" - "allocate-central_components-linux-amazon-2-amd64" - "allocate-central_components-linux-redhat-7-amd64" - "allocate-central_components-linux-redhat-8-amd64" diff --git a/deployability/modules/jobflow/templates/manager/Test-manager-complete.yaml b/deployability/modules/jobflow/templates/manager/Test-manager-complete.yaml index 5e70c9de65..cbc49c9d96 100644 --- a/deployability/modules/jobflow/templates/manager/Test-manager-complete.yaml +++ b/deployability/modules/jobflow/templates/manager/Test-manager-complete.yaml @@ -4,6 +4,7 @@ variables: manager-os: - linux-ubuntu-20.04-amd64 - linux-ubuntu-22.04-amd64 + - linux-ubuntu-24.04-amd64 - linux-amazon-2-amd64 - linux-redhat-7-amd64 - linux-redhat-8-amd64 @@ -62,14 +63,15 @@ tasks: - wazuh-1: "{working-dir}/manager-linux-centos-7-amd64/inventory.yaml" - wazuh-2: "{working-dir}/manager-linux-ubuntu-20.04-amd64/inventory.yaml" - wazuh-3: "{working-dir}/manager-linux-ubuntu-22.04-amd64/inventory.yaml" - - wazuh-4: "{working-dir}/manager-linux-redhat-7-amd64/inventory.yaml" - - wazuh-5: "{working-dir}/manager-linux-redhat-8-amd64/inventory.yaml" - - wazuh-6: "{working-dir}/manager-linux-redhat-9-amd64/inventory.yaml" - - wazuh-7: "{working-dir}/manager-linux-centos-8-amd64/inventory.yaml" - - wazuh-8: "{working-dir}/manager-linux-debian-10-amd64/inventory.yaml" - - wazuh-9: "{working-dir}/manager-linux-debian-11-amd64/inventory.yaml" - - wazuh-10: "{working-dir}/manager-linux-debian-12-amd64/inventory.yaml" - - wazuh-11: "{working-dir}/manager-linux-amazon-2-amd64/inventory.yaml" + - wazuh-4: "{working-dir}/manager-linux-ubuntu-24.04-amd64/inventory.yaml" + - wazuh-5: "{working-dir}/manager-linux-redhat-7-amd64/inventory.yaml" + - wazuh-6: "{working-dir}/manager-linux-redhat-8-amd64/inventory.yaml" + - wazuh-7: "{working-dir}/manager-linux-redhat-9-amd64/inventory.yaml" + - wazuh-8: "{working-dir}/manager-linux-centos-8-amd64/inventory.yaml" + - wazuh-9: "{working-dir}/manager-linux-debian-10-amd64/inventory.yaml" + - wazuh-10: "{working-dir}/manager-linux-debian-11-amd64/inventory.yaml" + - wazuh-11: "{working-dir}/manager-linux-debian-12-amd64/inventory.yaml" + - wazuh-12: "{working-dir}/manager-linux-amazon-2-amd64/inventory.yaml" - tests: "install,restart,stop,uninstall" - component: "manager" - wazuh-version: "" @@ -78,6 +80,7 @@ tasks: depends-on: - "allocate-manager-linux-ubuntu-20.04-amd64" - "allocate-manager-linux-ubuntu-22.04-amd64" + - "allocate-manager-linux-ubuntu-24.04-amd64" - "allocate-manager-linux-amazon-2-amd64" - "allocate-manager-linux-redhat-7-amd64" - "allocate-manager-linux-redhat-8-amd64" diff --git a/deployability/modules/testing/main.py b/deployability/modules/testing/main.py index 2c9988971d..2c7520838b 100755 --- a/deployability/modules/testing/main.py +++ b/deployability/modules/testing/main.py @@ -18,7 +18,6 @@ def parse_arguments(): parser.add_argument("--tests", required=True) parser.add_argument("--component", choices=['manager', 'agent', 'central_components'], required=True) parser.add_argument("--dependencies", action='append', default=[], required=False) - parser.add_argument("--cleanup", required=False, default=True) parser.add_argument("--wazuh-version", required=True) parser.add_argument("--wazuh-revision", required=True) parser.add_argument("--wazuh-branch", required=False) diff --git a/deployability/modules/testing/models.py b/deployability/modules/testing/models.py index 522ce9e07b..d8d6fc0c5e 100644 --- a/deployability/modules/testing/models.py +++ b/deployability/modules/testing/models.py @@ -19,7 +19,6 @@ class InputPayload(ExtraVars): tests: list[str] targets: list[str] dependencies: list[str] | None = None - cleanup: bool = True live: bool = False diff --git a/deployability/modules/testing/playbooks/cleanup.yml b/deployability/modules/testing/playbooks/cleanup.yml deleted file mode 100644 index 880d28c86e..0000000000 --- a/deployability/modules/testing/playbooks/cleanup.yml +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2015, Wazuh Inc. -# Created by Wazuh, Inc. . -# This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 - -- hosts: localhost - become: true - tasks: - - name: Clean test directory - file: - path: "{{ working_dir }}" - state: absent diff --git a/deployability/modules/testing/testing.py b/deployability/modules/testing/testing.py index 0522ae4ef6..67aee5632c 100644 --- a/deployability/modules/testing/testing.py +++ b/deployability/modules/testing/testing.py @@ -15,7 +15,6 @@ class Tester: _playbooks_dir = Path(__file__).parent / 'playbooks' _setup_playbook = _playbooks_dir / 'setup.yml' - _cleanup_playbook = _playbooks_dir / 'cleanup.yml' _test_template = _playbooks_dir / 'test.yml' @classmethod @@ -60,14 +59,6 @@ def run(cls, payload: InputPayload) -> None: cls._setup(ansible, extra_vars) cls._run_tests(payload.tests, ansible, extra_vars) - # Clean up if required - if payload.cleanup: - for target_path in payload.targets: - target_value = eval(target_path).values() - target_inventory = Inventory(**Utils.load_from_yaml(str(list(target_value)[0]))) - logger.info("Cleaning up") - cls._cleanup(ansible, extra_vars['working_dir']) - @classmethod def _get_extra_vars(cls, payload: InputPayload) -> ExtraVars: """ @@ -96,13 +87,13 @@ def _run_tests(cls, test_list: list[str], ansible: Ansible, extra_vars: ExtraVar rendering_var = {**extra_vars, 'test': test} template = str(cls._test_template) result = ansible.run_playbook(template, rendering_var) + for event in result.events: - logger.info(f"{event['stdout']}") - if result.stats["failures"]: - for event in result.events: + if result.stats["failures"]: if "fatal" in event['stdout']: - raise Exception(f"Test {test} failed with error") - + raise Exception(f"Test {test} failed with error: {event['stdout']}") + else: + logger.info(f"Test {test} Finished with: {event['stdout']}") @classmethod def _setup(cls, ansible: Ansible, extra_vars: ExtraVars) -> None: @@ -120,16 +111,3 @@ def _setup(cls, ansible: Ansible, extra_vars: ExtraVars) -> None: if "fatal" in event['stdout']: raise Exception(f"Setup {template} failed with error: {event['stdout']}") - - @classmethod - def _cleanup(cls, ansible: Ansible, remote_working_dir: str = '/tmp') -> None: - """ - Cleanup the environment after the tests. - - Args: - ansible (Ansible): The Ansible object to run the cleanup. - remote_working_dir (str): The remote working directory. - """ - extra_vars = {'working_dir': remote_working_dir} - playbook = str(cls._cleanup_playbook) - ansible.run_playbook(playbook, extra_vars) diff --git a/deployability/modules/testing/tests/helpers/manager.py b/deployability/modules/testing/tests/helpers/manager.py index f180f07199..fd3294ceab 100644 --- a/deployability/modules/testing/tests/helpers/manager.py +++ b/deployability/modules/testing/tests/helpers/manager.py @@ -89,7 +89,6 @@ def uninstall_manager(inventory_path) -> None: ]) system_commands = [ - "systemctl disable wazuh-manager", "systemctl daemon-reload" ] diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py b/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py index aa33e5761f..4081220665 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py @@ -28,7 +28,7 @@ def compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabili for vulnerability in expected_vulns: if vulnerability not in vulnerabilities.get(agent, []): logging.critical(f"Vulnerability not found for {agent}: {vulnerability}") - if agent not in vulnerabilities_not_found: + if agent not in vulnerabilities_not_found.keys(): vulnerabilities_not_found[agent] = [] failed_agents.append(agent) @@ -39,9 +39,10 @@ def compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabili for vulnerability in agent_vulnerabilities: if vulnerability not in expected_vulnerabilities.get(agent, []): logging.critical(f"Vulnerability unexpected found for {agent}: {vulnerability}") - if agent not in vulnerabilities_unexpected: + if agent not in vulnerabilities_unexpected.keys(): vulnerabilities_unexpected[agent] = [] - failed_agents.append(agent) + if agent not in failed_agents: + failed_agents.append(agent) result = False vulnerabilities_unexpected[agent].append(vulnerability) @@ -158,7 +159,6 @@ def equals_but_not_empty(x, y): empty = lambda x: len(x) == 0 no_errors = lambda x: all( - not any(x[host][level] for level in ["ERROR", "CRITICAL", "WARNING"]) + not any(x[host][level] for level in ['ERROR', 'CRITICAL']) for host in x ) - diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py b/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py index 1ca658f186..d13cf54726 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py @@ -31,6 +31,7 @@ WAZUH_STATES_VULNERABILITIES_INDEXNAME_TEMPLATE = 'wazuh-states-vulnerabilities-{cluster_name}' +INDEXER_RESULT_WINDOWS_VULN_E2E = 50000 def get_wazuh_states_vulnerabilities_indexname(cluster_name: str = 'wazuh') -> str: @@ -176,3 +177,36 @@ def delete_index(host_manager: HostManager, credentials: dict = {'user': 'admin' requests.delete(url=url, verify=False, auth=requests.auth.HTTPBasicAuth(credentials['user'], credentials['password']), headers=headers) + + +def extend_result_window(host_manager: HostManager, credentials: dict = {'user': 'admin', 'password': 'changeme'}, + index: str = 'wazuh-alerts*', new_max_result_window: int = 100000): + """Extend the max_result_window setting for a Wazuh Indexer index. + + Args: + host_manager: An instance of the HostManager class containing information about hosts. + credentials (Optional): A dictionary containing the Indexer credentials. Defaults to + {'user': 'admin', 'password': 'changeme'}. + index (Optional): The Indexer index name. Defaults to 'wazuh-alerts*'. + new_max_result_window (Optional): The new maximum result window size. Defaults to 100,000. + """ + logging.info(f"Extending max_result_window for {index} index to {new_max_result_window}") + + url = f"https://{host_manager.get_master_ip()}:9200/{index}/_settings" + headers = { + 'Content-Type': 'application/json', + } + data = { + "index": { + "max_result_window": new_max_result_window + } + } + + response = requests.put(url=url, json=data, verify=False, + auth=requests.auth.HTTPBasicAuth(credentials['user'], credentials['password']), + headers=headers) + + if response.status_code == 200: + logging.info(f"Successfully updated max_result_window for {index} index.") + else: + logging.error(f"Failed to update max_result_window for {index} index. Response: {response.text}") \ No newline at end of file diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py b/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py index 3369e3313e..875e0539ca 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py @@ -67,27 +67,31 @@ def get_hosts_logs(host_manager: HostManager, host_group: str = 'all') -> Dict[s def check_errors_in_environment(host_manager: HostManager, greater_than_timestamp: str = '', - expected_errors: List[str] = None) -> dict: - """Check if there are errors in the environment + expected_errors: List[str] = None, + error_levels=None) -> dict: + """Check if there are errors in the environment. Args: host_manager (HostManager): An instance of the HostManager class. greater_than_timestamp (str): Timestamp to filter the logs expected_errors (List): List of expected errors. Default None + error_levels (List): List of the error levels to check. Default None. Returns: dict: Errors found in the environment """ - - error_level_to_search = ['ERROR', 'CRITICAL', 'WARNING'] - expected_errors = expected_errors or [] + default_error_levels = ['ERROR', 'WARNING', 'CRITICAL'] + if not expected_errors: + expected_errors = [] + if not error_levels: + error_levels = default_error_levels environment_logs = get_hosts_logs(host_manager) environment_level_logs = {} for host, environment_log in environment_logs.items(): environment_level_logs[host] = {} - for level in error_level_to_search: + for level in error_levels: environment_level_logs[host][level] = [] regex = re.compile(fr'((\d{{4}}\/\d{{2}}\/\d{{2}} \d{{2}}:\d{{2}}:\d{{2}}) (.+): ({level}):(.*))') diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py b/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py index 4cab9687f0..618b2ae9a3 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py @@ -243,9 +243,9 @@ def get_vulnerability_alerts(host_manager: HostManager, agent_list, packages_dat def get_vulnerabilities_index(host_manager: HostManager, agent_list, packages_data: List[Dict], - greater_than_timestamp: str = "") -> Dict: + greater_than_timestamp: str = "", size=10000) -> Dict: vulnerabilities = get_vulnerabilities_from_states_by_agent(host_manager, agent_list, - greater_than_timestamp=greater_than_timestamp) + greater_than_timestamp=greater_than_timestamp, size=size) package_vulnerabilities = filter_vulnerabilities_by_packages(host_manager, vulnerabilities, packages_data) return package_vulnerabilities diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py index 871b07ffeb..adf303c63d 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py @@ -275,13 +275,16 @@ def parse_vulnerability_from_state(state): def get_vulnerabilities_from_states_by_agent(host_manager: HostManager, agents: List[str], - greater_than_timestamp: str = None, cluster_name='wazuh') -> dict: + greater_than_timestamp: str = None, cluster_name='wazuh', + size=10000) -> dict: """Get vulnerabilities from the vulnerability state index by agent. Args: host_manager (HostManager): Host manager object. agents (list): List of agents. greater_than_timestamp (str, optional): Greater than timestamp. Defaults to None. + size (int, optional): Maximun number of vulnerabilities to collect. + More information in https://opensearch.org/docs/latest/search-plugins/searching-data/paginate Returns: dict: Dictionary of vulnerabilities by agent. @@ -309,8 +312,8 @@ def get_vulnerabilities_from_states_by_agent(host_manager: HostManager, agents: filter=states_filter, index=index, credentials={'user': indexer_user, - 'password': indexer_password} - )['hits']['hits'] + 'password': indexer_password}, + size=size)['hits']['hits'] except KeyError as e: logging.error(f"No vulnerabilities were obtained for {agent}. Exception {str(e)}") diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json index 0a63077d66..6d696e874a 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json @@ -262,6 +262,64 @@ }, "uninstall_name": "grafana*" }, + "grafana-9.3.16": { + "package_name": "grafana", + "package_version": "9.3.16", + "CVE": [], + "urls": { + "ubuntu": { + "amd64": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/ubuntu_vuln_packages/grafana_9.3.16_amd64.deb", + "arm64v8": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/ubuntu_vuln_packages/grafana_9.3.16_arm64.deb" + } + }, + "uninstall_name": "grafana*" + }, + "grafana-9.3.16-1": { + "package_name": "grafana", + "package_version": "9.3.16-1", + "CVE": [], + "urls": { + "centos": { + "amd64": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/centos_vuln_packages/grafana-9.3.16-1.x86_64.rpm", + "arm64v8": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/centos_vuln_packages/grafana-9.3.16-1.aarch64.rpm" + } + }, + "uninstall_name": "grafana*" + }, + "grafana-9.4.1": { + "package_name": "grafana", + "package_version": "9.4.1", + "CVE": [ + "CVE-2023-2801", + "CVE-2023-3128", + "CVE-2023-1387", + "CVE-2023-2183" + ], + "urls": { + "ubuntu": { + "amd64": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/ubuntu_vuln_packages/grafana_9.4.1_amd64.deb", + "arm64v8": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/ubuntu_vuln_packages/grafana_9.4.1_arm64.deb" + } + }, + "uninstall_name": "grafana*" + }, + "grafana-9.4.1-1": { + "package_name": "grafana", + "package_version": "9.4.1-1", + "CVE": [ + "CVE-2023-2801", + "CVE-2023-3128", + "CVE-2023-1387", + "CVE-2023-2183" + ], + "urls": { + "centos": { + "amd64": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/centos_vuln_packages/grafana-9.4.1-1.x86_64.rpm", + "arm64v8": "https://s3.amazonaws.com/ci.wazuh.com/qa/testing_files/end_to_end/vulnerability_detector/centos_vuln_packages/grafana-9.4.1-1.aarch64.rpm" + } + }, + "uninstall_name": "grafana*" + }, "grafana-9.4.17": { "package_name": "grafana", "package_version": "9.4.17", diff --git a/deps/wazuh_testing/wazuh_testing/scripts/data_visualizations.py b/deps/wazuh_testing/wazuh_testing/scripts/data_visualizations.py index 85a9c195c0..56be65b61a 100644 --- a/deps/wazuh_testing/wazuh_testing/scripts/data_visualizations.py +++ b/deps/wazuh_testing/wazuh_testing/scripts/data_visualizations.py @@ -52,6 +52,7 @@ def get_script_arguments(): help=f'Path to Json with Columns to Plot. Default {None}.') parser.add_argument('-u', '--unify', dest='unify', action='store_true', help='Unify data of the binary processes with their subprocesses to plot.') + parser.add_argument('-x', help="Title of the generated chart, add extra info here.", type=str, dest='plot_title') return parser.parse_args() @@ -66,7 +67,8 @@ def main(): visualization_options = { 'dataframes_paths': options.csv_list, 'store_path': options.destination, - 'base_name': options.name + 'base_name': options.name, + 'plot_title': options.plot_title } strategy = target diff --git a/deps/wazuh_testing/wazuh_testing/tools/performance/visualization.py b/deps/wazuh_testing/wazuh_testing/tools/performance/visualization.py index 6ae2a0df2f..f566ffb745 100644 --- a/deps/wazuh_testing/wazuh_testing/tools/performance/visualization.py +++ b/deps/wazuh_testing/wazuh_testing/tools/performance/visualization.py @@ -20,19 +20,22 @@ class DataVisualizer(ABC): dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. base_name (str, optional): base name used to store the images. + plot_title (str, optional): Title for the generated plots. """ - def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None): + def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, plot_title=None): """Initializes the DataVisualizer. Args: dataframes_paths (list): List of paths to CSV files. store_path (str, optional): Path to store the CSV images. Defaults to the temp directory. base_name (str, optional): Base name used to store the images. + plot_title (str, optional): Title for the generated plots. """ self.dataframes_paths = dataframes_paths self.store_path = store_path self.base_name = base_name + self.plot_title = plot_title self.dataframe = pd.DataFrame() self._load_dataframes() @@ -198,6 +201,7 @@ class BinaryDatavisualizer(DataVisualizer): dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. base_name (str, optional): base name used to store the images. + plot_title (str, optional): Title for the generated plots. binary_metrics_fields_to_plot (list): List of binary metrics fields to plot. binary_metrics_extra_fields (list): List of additional binary metrics fields. binary_metrics_fields (list): Combined list of binary metrics fields. @@ -209,16 +213,17 @@ class BinaryDatavisualizer(DataVisualizer): binary_metrics_extra_fields = ["Daemon", "Version", "PID"] binary_metrics_fields = binary_metrics_fields_to_plot + binary_metrics_extra_fields - def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, unify_child_daemon_metrics=False): + def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, unify_child_daemon_metrics=False, plot_title=None): """Initialize the BinaryDatavisualizer. Args: dataframes (list): List of dataframes containing binary metrics data. store_path (str, optional): Path to store visualizations. Defaults to system temp directory. base_name (str, optional): Base name for saved visualizations. Defaults to None. + plot_title (str, optional): Title for the generated plots. unify_child_daemon_metrics (bool, optional): Whether to unify child daemon metrics. Defaults to False. """ - super().__init__(dataframes_paths, store_path, base_name) + super().__init__(dataframes_paths, store_path, base_name, plot_title) self._validate_dataframe() if unify_child_daemon_metrics: self.dataframe = self.dataframe.reset_index(drop=False) @@ -302,6 +307,7 @@ def plot(self): This method creates and saves plots for each binary metric field. """ + p_title = self.plot_title.replace('<>', ' ') columns_to_plot = self._get_fields_to_plot() for element in columns_to_plot: _, ax = plt.subplots() @@ -311,7 +317,7 @@ def plot(self): self._basic_plot(ax, self.dataframe[self.dataframe.Daemon == daemon][element], label=daemon, color=color) - self._save_custom_plot(ax, element, element) + self._save_custom_plot(ax, element, p_title) class DaemonStatisticsVisualizer(DataVisualizer): @@ -322,6 +328,7 @@ class DaemonStatisticsVisualizer(DataVisualizer): dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. base_name (str, optional): base name used to store the images. + plot_title (str, optional): Title for the generated plots. daemon (str): Name of the daemon for which statistics are visualized. plots_data (dict): Data required for plotting statistics. expected_fields (list): List of expected fields for the daemon statistics. @@ -331,17 +338,18 @@ class DaemonStatisticsVisualizer(DataVisualizer): statistics_plot_data_directory = join(dirname(realpath(__file__)), '..', '..', 'data', 'data_visualizer') statistics_filename_suffix = '_csv_headers.json' - def __init__(self, dataframes_paths, daemon, store_path=gettempdir(), base_name=None): + def __init__(self, dataframes_paths, daemon, store_path=gettempdir(), base_name=None, plot_title=None): """Initialize the DaemonStatisticsVisualizer. Args: dataframes (list): List of dataframes containing daemon statistics data. daemon (str): Name of the daemon for which statistics are visualized. store_path (str, optional): Path to store visualizations. Defaults to system temp directory. + plot_title (str, optional): Title for the generated plots. base_name (str, optional): Base name for saved visualizations. Defaults to None. """ self.daemon = daemon - super().__init__(dataframes_paths, store_path, base_name) + super().__init__(dataframes_paths, store_path, base_name, plot_title) self.plots_data = self._load_plot_data() self.expected_fields = [] for graph in self.plots_data.values(): @@ -383,6 +391,7 @@ def plot(self): This method creates and saves plots for each statistic field. """ + p_title = self.plot_title.replace('<>', ' ') for element in self.plots_data.values(): columns = element['columns'] title = element['title'] @@ -391,7 +400,7 @@ def plot(self): _, ax = plt.subplots() for column, color in zip(columns, colors): self._basic_plot(ax, self.dataframe[column], label=column, color=color) - self._save_custom_plot(ax, title, title) + self._save_custom_plot(ax, title, p_title) class LogcollectorStatisticsVisualizer(DaemonStatisticsVisualizer): @@ -402,19 +411,21 @@ class LogcollectorStatisticsVisualizer(DaemonStatisticsVisualizer): dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. base_name (str, optional): base name used to store the images. + plot_title (str, optional): Title for the generated plots. general_fields (list): List of general fields for logcollector statistics. """ general_fields = ['Location', 'Target'] - def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None): + def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, plot_title=None): """Initialize the LogcollectorStatisticsVisualizer. Args: dataframes (list): List of dataframes containing logcollector statistics data. store_path (str, optional): Path to store visualizations. Defaults to system temp directory. base_name (str, optional): Base name for saved visualizations. Defaults to None. + plot_title (str, optional): Title for the generated plots. """ - super().__init__(dataframes_paths, 'logcollector', store_path, base_name) + super().__init__(dataframes_paths, 'logcollector', store_path, base_name, plot_title) def _get_expected_fields(self): """Get the list of expected fields for logcollector statistics. @@ -437,6 +448,7 @@ def plot(self): This method creates and saves plots for each logcollector target. """ + p_title = self.plot_title.replace('<>', ' ') for element in self.plots_data.values(): _, ax = plt.subplots() targets = self._get_logcollector_location() @@ -445,7 +457,7 @@ def plot(self): self._basic_plot(ax, self.dataframe[self.dataframe.Location == target][element['columns']], label=target, color=color) - self._save_custom_plot(ax, element['title'], element['title']) + self._save_custom_plot(ax, element['title'], p_title) class ClusterStatisticsVisualizer(DataVisualizer): @@ -456,19 +468,21 @@ class ClusterStatisticsVisualizer(DataVisualizer): dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. base_name (str, optional): base name used to store the images. + plot_title (str, optional): Title for the generated plots. expected_cluster_fields (list): List of expected fields for cluster statistics. """ expected_cluster_fields = ['node_name', 'activity', 'time_spent(s)'] - def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None): + def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, plot_title=None): """Initialize the ClusterStatisticsVisualizer. Args: dataframes_paths (list): List of paths to dataframes containing cluster statistics data. store_path (str, optional): Path to store visualizations. Defaults to system temp directory. + plot_title (str, optional): Title for the generated plots. base_name (str, optional): Base name for saved visualizations. Defaults to None. """ - super().__init__(dataframes_paths, store_path, base_name) + super().__init__(dataframes_paths, store_path, base_name, plot_title) self._validate_dataframe() def _get_expected_fields(self) -> list: @@ -485,16 +499,19 @@ def plot(self): This method creates and saves plots for each cluster activity. """ elements = list(self.dataframe['activity'].unique()) - + result = self.plot_title.replace("<>", "_").replace(":", "_").replace(",", "") + start = result.find("Version_") + output = result[start:] for element in elements: _, ax = plt.subplots() + p_title = element.replace(' ', '_').lower() + "-" + output nodes = self.dataframe[self.dataframe.activity == element]['node_name'].unique() current_df = self.dataframe[self.dataframe.activity == element] current_df.reset_index(drop=True, inplace=True) for node, color in zip(nodes, self._color_palette(len(nodes))): self._basic_plot(ax=ax, dataframe=current_df[current_df.node_name == node]['time_spent(s)'], label=node, color=color) - self._save_custom_plot(ax, 'time_spent(s)', element.replace(' ', '_').lower(), disable_x_labels=True, + self._save_custom_plot(ax, 'time_spent(s)', p_title, disable_x_labels=True, statistics=DataVisualizer._get_statistics( current_df['time_spent(s)'], calculate_mean=True, calculate_median=True)) @@ -506,19 +523,21 @@ class IndexerAlerts(DataVisualizer): dataframes_paths (list): paths of the CSVs. dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. + plot_title (str, optional): Title for the generated plots. expected_fields (list): List of expected fields for indexer alerts. """ expected_fields = ['Total alerts'] - def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None): + def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, plot_title=None): """Initialize the IndexerAlerts visualizer. Args: dataframes_paths (list): List of paths to dataframes containing indexer alerts data. store_path (str, optional): Path to store visualizations. Defaults to system temp directory. + plot_title (str, optional): Title for the generated plots. base_name (str, optional): Base name for saved visualizations. Defaults to None. """ - super().__init__(dataframes_paths, store_path, base_name) + super().__init__(dataframes_paths, store_path, base_name, plot_title) self._validate_dataframe() def _get_expected_fields(self): @@ -543,23 +562,25 @@ def _plot_agregated_alerts(self): This method creates and saves a plot for the aggregated alerts. """ + p_title = self.plot_title.replace('<>', ' ') _, ax = plt.subplots() self.dataframe['Difference'] = self.dataframe['Total alerts'].diff() self.dataframe['Difference'] = self.dataframe['Difference'] / self._calculate_timestamp_interval() self._basic_plot(ax=ax, dataframe=self.dataframe['Difference'], label='Alerts per timestamp', color=self._color_palette(1)[0]) - self._save_custom_plot(ax, 'Different alerts', 'Difference alerts') + self._save_custom_plot(ax, 'Different alerts', p_title) def _plot_plain_alerts(self): """Plot the total alerts. This method creates and saves a plot for the total alerts. """ + p_title = self.plot_title.replace('<>', ' ') _, ax = plt.subplots() self._basic_plot(ax=ax, dataframe=self.dataframe['Total alerts'], label='Total alerts', color=self._color_palette(1)[0]) - self._save_custom_plot(ax, 'Total alerts', 'Total alerts') + self._save_custom_plot(ax, 'Total alerts', p_title) def plot(self): """Plot the indexer alerts data. @@ -577,19 +598,21 @@ class IndexerVulnerabilities(DataVisualizer): dataframes_paths (list): paths of the CSVs. dataframe (pandas.Dataframe): dataframe containing the info from all the CSVs. store_path (str): path to store the CSV images. Defaults to the temp directory. + plot_title (str, optional): Title for the generated plots. expected_fields (list): List of expected fields for indexer vulnerabilities. """ expected_fields = ['Total vulnerabilities'] - def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None): + def __init__(self, dataframes_paths, store_path=gettempdir(), base_name=None, plot_title=None): """Initialize the IndexerVulnerabilities visualizer. Args: dataframes_paths (list): List of paths to dataframes containing indexer vulnerabilities data. store_path (str, optional): Path to store visualizations. Defaults to system temp directory. + plot_title (str, optional): Title for the generated plots. base_name (str, optional): Base name for saved visualizations. Defaults to None. """ - super().__init__(dataframes_paths, store_path, base_name) + super().__init__(dataframes_paths, store_path, base_name, plot_title) self._validate_dataframe() def _get_expected_fields(self): @@ -605,7 +628,8 @@ def plot(self): This method creates and saves a plot for the total vulnerabilities. """ + p_title = self.plot_title.replace('<>', ' ') _, ax = plt.subplots() self._basic_plot(ax=ax, dataframe=self.dataframe['Total vulnerabilities'], label='Indexed Vulnerabilities', color=self._color_palette(1)[0]) - self._save_custom_plot(ax, 'Total Vulnerabilities', 'Total vulnerabilities') + self._save_custom_plot(ax, 'Total Vulnerabilities', p_title) diff --git a/provisioning/roles/wazuh/ansible-wazuh-manager/tasks/installation_from_custom_packages.yml b/provisioning/roles/wazuh/ansible-wazuh-manager/tasks/installation_from_custom_packages.yml index d5f120a24a..20e56a60fa 100644 --- a/provisioning/roles/wazuh/ansible-wazuh-manager/tasks/installation_from_custom_packages.yml +++ b/provisioning/roles/wazuh/ansible-wazuh-manager/tasks/installation_from_custom_packages.yml @@ -6,27 +6,39 @@ state: present when: - wazuh_custom_packages_installation_manager_enabled + register: wazuh_manager_installation + retries: 4 + delay: 30 + until: wazuh_manager_installation is succeeded when: - ansible_os_family|lower == "debian" - block: - - name: Install Wazuh Manager from .rpm packages | yum - yum: - name: "{{ wazuh_custom_packages_installation_manager_rpm_url }}" - state: present - when: - - wazuh_custom_packages_installation_manager_enabled - - not (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") - - not (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") + - name: Install Wazuh Manager from .rpm packages | yum + yum: + name: "{{ wazuh_custom_packages_installation_manager_rpm_url }}" + state: present + when: + - wazuh_custom_packages_installation_manager_enabled + - not (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") + - not (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") + register: wazuh_manager_installation_yum + retries: 4 + delay: 30 + until: wazuh_manager_installation_yum is succeeded - - name: Install Wazuh Manager from .rpm packages | dnf - dnf: - name: "{{ wazuh_custom_packages_installation_manager_rpm_url }}" - state: present - disable_gpg_check: True - when: - - wazuh_custom_packages_installation_manager_enabled - - (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") or - (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") + - name: Install Wazuh Manager from .rpm packages | dnf + dnf: + name: "{{ wazuh_custom_packages_installation_manager_rpm_url }}" + state: present + disable_gpg_check: True + when: + - wazuh_custom_packages_installation_manager_enabled + - (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") or + (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") + register: wazuh_manager_installation_dnf + retries: 4 + delay: 30 + until: wazuh_manager_installation_dnf is succeeded when: - - ansible_os_family|lower == "redhat" \ No newline at end of file + - ansible_os_family|lower == "redhat" diff --git a/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml b/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml index 2a636b4ce5..8ad2b2e7a2 100644 --- a/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml +++ b/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml @@ -250,21 +250,21 @@ amd64: luxon-2.5.2 arm64v8: luxon-2.5.2 centos: - amd64: grafana-8.5.27-1 - arm64v8: grafana-8.5.27-1 + amd64: grafana-9.3.16-1 + arm64v8: grafana-9.3.16-1 ubuntu: - amd64: grafana-8.5.27 - arm64v8: grafana-8.5.27 + amd64: grafana-9.3.16 + arm64v8: grafana-9.3.16 body: operation: update_package package: from: centos: - amd64: grafana-8.5.27-1 - arm64v8: grafana-8.5.27-1 + amd64: grafana-9.3.16-1 + arm64v8: grafana-9.3.16-1 ubuntu: - amd64: grafana-8.5.27 - arm64v8: grafana-8.5.27 + amd64: grafana-9.3.16 + arm64v8: grafana-9.3.16 windows: amd64: node-v18.20.2 macos: @@ -272,11 +272,11 @@ arm64v8: luxon-2.5.2 to: centos: - amd64: grafana-9.1.1-1 - arm64v8: grafana-9.1.1-1 + amd64: grafana-9.4.1-1 + arm64v8: grafana-9.4.1-1 ubuntu: - amd64: grafana-9.1.1 - arm64v8: grafana-9.1.1 + amd64: grafana-9.4.1 + arm64v8: grafana-9.4.1 windows: amd64: node-v20.5.1 macos: @@ -287,11 +287,11 @@ target_os: ['centos', 'ubuntu', 'macos', 'windows'] package: centos: - amd64: grafana-9.1.1-1 - arm64v8: grafana-9.1.1-1 + amd64: grafana-9.4.1-1 + arm64v8: grafana-9.4.1-1 ubuntu: - amd64: grafana-9.1.1 - arm64v8: grafana-9.1.1 + amd64: grafana-9.4.1 + arm64v8: grafana-9.4.1 windows: amd64: node-v20.5.1 macos: diff --git a/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml b/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml index 9f2497a2c7..98ce713dd1 100644 --- a/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml +++ b/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml @@ -7,6 +7,8 @@ value: 'yes' - feed-update-interval: value: 10h + - offline-url: + value: 'https://localhost' - section: indexer elements: - enabled: diff --git a/tests/end_to_end/test_vulnerability_detector/conftest.py b/tests/end_to_end/test_vulnerability_detector/conftest.py index 181e641ec6..7467e339e6 100644 --- a/tests/end_to_end/test_vulnerability_detector/conftest.py +++ b/tests/end_to_end/test_vulnerability_detector/conftest.py @@ -52,7 +52,7 @@ def test_example(host_manager): restore_configuration, save_indexer_credentials_into_keystore) from wazuh_testing.end_to_end.indexer_api import ( - get_wazuh_states_vulnerabilities_indexname, delete_index) + get_wazuh_states_vulnerabilities_indexname, delete_index, extend_result_window, INDEXER_RESULT_WINDOWS_VULN_E2E) from wazuh_testing.end_to_end.logs import (get_hosts_alerts, get_hosts_logs, truncate_remote_host_group_files) from wazuh_testing.end_to_end.remote_operations_handler import ( @@ -320,8 +320,11 @@ def setup(preconditions, teardown, host_manager) -> Generator[Dict, None, None]: timeout_vulnerabilities_detected = len(agents_to_check) * PACKAGE_VULNERABILITY_SCAN_TIME time.sleep(timeout_syscollector_scan + timeout_vulnerabilities_detected) + vuln_index = get_wazuh_states_vulnerabilities_indexname() + extend_result_window(host_manager, index=vuln_index, new_max_result_window=INDEXER_RESULT_WINDOWS_VULN_E2E) - vulnerabilities = get_vulnerabilities_index(host_manager, agents_to_check, package_data) + vulnerabilities = get_vulnerabilities_index(host_manager, agents_to_check, package_data, + size=INDEXER_RESULT_WINDOWS_VULN_E2E) vulnerabilities_from_alerts = get_vulnerability_alerts(host_manager, agents_to_check, package_data, test_timestamp) diff --git a/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py b/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py index 8b2f73b6f6..9da3e16345 100644 --- a/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py +++ b/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py @@ -70,7 +70,9 @@ get_vulnerabilities_from_states_by_agent) from wazuh_testing.end_to_end.waiters import wait_until_vd_is_updated from wazuh_testing.tools.system import HostManager - +from wazuh_testing.end_to_end.indexer_api import (INDEXER_RESULT_WINDOWS_VULN_E2E, + extend_result_window, + get_wazuh_states_vulnerabilities_indexname) pytestmark = [pytest.mark.e2e, pytest.mark.vulnerability_detector, pytest.mark.tier0] @@ -81,6 +83,8 @@ FIRST_SCAN_TIME = None FIRST_SCAN_VULNERABILITIES_INDEX = {} AGENT_REGISTRATION_TIMEOUT = 15 +TIMEOUT_START_MANAGER = 60 +TESTS_UNEXPECTED_ERRORS_LEVELS = ['ERROR', 'CRITICAL'] VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS = [ r"Invalid ID \d{3} for the source", @@ -182,6 +186,9 @@ def configure_vulnerability_detection_test_environment( host_manager, vulnerability_detection_previously_enabled ) + # Wait for 1 minute to ensure all managers have fully started + time.sleep(TIMEOUT_START_MANAGER) + start_agent_and_wait_until_connected(host_manager) if not vulnerability_detection_previously_enabled: @@ -192,6 +199,11 @@ def configure_vulnerability_detection_test_environment( yield test_timestamp +def max_result_window(host_manager): + vuln_index = get_wazuh_states_vulnerabilities_indexname() + extend_result_window(host_manager, index=vuln_index, new_max_result_window=INDEXER_RESULT_WINDOWS_VULN_E2E) + + @pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") class TestInitialScans: # Checks definition @@ -254,7 +266,7 @@ def test_first_syscollector_scan( configure_vulnerability_detection_test_environment, record_property, clean_environment_logs, - delete_states_vulnerability_index, + delete_states_vulnerability_index ): """ description: Validates the initiation of the first Syscollector scans across all agents in the environment. @@ -326,11 +338,13 @@ def test_first_syscollector_scan( logging.critical("Waiting until agent all agents have been scanned.") time.sleep(TIMEOUT_PER_AGENT_VULNERABILITY_FIRST_SCAN * len(AGENTS_SCANNED_FIRST_SCAN)) + max_result_window(host_manager) logging.critical("Checking vulnerabilities in the index") vuln_by_agent_index = get_vulnerabilities_from_states_by_agent( host_manager, AGENTS_SCANNED_FIRST_SCAN, greater_than_timestamp=FIRST_SCAN_TIME, + size=INDEXER_RESULT_WINDOWS_VULN_E2E ) # Store the vulnerabilities in the global variable to make the comparision in test_consistency_initial_scans @@ -371,7 +385,7 @@ def test_first_syscollector_scan( logging.critical("Checking for errors in the environment") unexpected_errors = check_errors_in_environment( - host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS + host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, error_levels=TESTS_UNEXPECTED_ERRORS_LEVELS ) test_result.validate_check( @@ -501,10 +515,12 @@ def test_syscollector_second_scan( global FIRST_SCAN_TIME + max_result_window(host_manager) + logging.critical("Checking vulnerabilities in the index") vuln_by_agent_index = get_vulnerabilities_from_states_by_agent( - host_manager, agents_to_check_vulns, greater_than_timestamp=FIRST_SCAN_TIME - ) + host_manager, agents_to_check_vulns, greater_than_timestamp=FIRST_SCAN_TIME, + size=INDEXER_RESULT_WINDOWS_VULN_E2E) logging.critical( "Checking that all agents has been scanned and generated vulnerabilities in the index" @@ -522,7 +538,7 @@ def test_syscollector_second_scan( unexpected_errors = check_errors_in_environment( host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, - greater_than_timestamp=get_timestamp, + greater_than_timestamp=get_timestamp, error_levels=TESTS_UNEXPECTED_ERRORS_LEVELS ) test_result.validate_check( @@ -707,8 +723,10 @@ def test_install_vulnerable_package_when_agent_down(self, host_manager, request, time.sleep(VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN + PACKAGE_VULNERABILITY_SCAN_TIME * len(AGENTS_SCANNED_FIRST_SCAN)) package_data = [body["package"]] + max_result_window(host_manager) - vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data) + vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data, + size=INDEXER_RESULT_WINDOWS_VULN_E2E) expected_vulnerabilities = get_expected_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], body["package"]) duplicated_vulnerabilities = get_duplicated_vulnerabilities(vulnerabilities) @@ -747,7 +765,7 @@ def test_install_vulnerable_package_when_agent_down(self, host_manager, request, errors_environment = check_errors_in_environment( host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, - greater_than_timestamp=test_timestamp, + greater_than_timestamp=test_timestamp, error_levels=TESTS_UNEXPECTED_ERRORS_LEVELS ) test_result.validate_check("no_errors", [Evidence("error_level_messages", errors_environment)]) @@ -796,8 +814,10 @@ def test_change_agent_manager(self, permutate_agents_managers, request, precondi time.sleep(VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN + PACKAGE_VULNERABILITY_SCAN_TIME * len(AGENTS_SCANNED_FIRST_SCAN)) package_data = [body["package"]] + max_result_window(host_manager) - vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data) + vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data, + size=INDEXER_RESULT_WINDOWS_VULN_E2E) expected_vulnerabilities = get_expected_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], body["package"]) @@ -872,7 +892,7 @@ def test_change_agent_manager(self, permutate_agents_managers, request, precondi errors_environment = check_errors_in_environment( host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, - greater_than_timestamp=test_timestamp, + greater_than_timestamp=test_timestamp, error_levels=TESTS_UNEXPECTED_ERRORS_LEVELS ) test_result.validate_check("no_errors", [Evidence("error_level_messages", errors_environment)]) @@ -928,8 +948,10 @@ def test_vulnerability_detector_scans_cases(self, request, preconditions, body, package_data = [body["package"]["to"], body["package"]["from"]] else: package_data = [body["package"]] + max_result_window(host_manager) - vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data) + vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data, + size=INDEXER_RESULT_WINDOWS_VULN_E2E) expected_vulnerabilities = get_expected_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], body["package"]) duplicated_vulnerabilities = get_duplicated_vulnerabilities(vulnerabilities) @@ -1001,7 +1023,7 @@ def test_vulnerability_detector_scans_cases(self, request, preconditions, body, errors_environment = check_errors_in_environment( host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, - greater_than_timestamp=test_timestamp, + greater_than_timestamp=test_timestamp, error_levels=TESTS_UNEXPECTED_ERRORS_LEVELS ) test_result.validate_check("no_errors", [Evidence("error_level_messages", errors_environment)]) diff --git a/tests/performance/test_api/test_api_endpoints_performance.py b/tests/performance/test_api/test_api_endpoints_performance.py index 4185be1a91..629c596e2d 100755 --- a/tests/performance/test_api/test_api_endpoints_performance.py +++ b/tests/performance/test_api/test_api_endpoints_performance.py @@ -2,7 +2,6 @@ from os.path import join, dirname, realpath from time import sleep -import warnings import pytest import requests from yaml import safe_load @@ -33,6 +32,12 @@ def test_api_endpoints(test_case, set_api_test_environment, api_healthcheck): set_api_test_environment (fixture): Fixture that modifies the API security options. api_healthcheck (fixture): Fixture used to check that the API is ready to respond requests. """ + # Apply xfails + if test_case['endpoint'] in xfailed_items.keys() and \ + test_case['method'] == xfailed_items[test_case['endpoint']]['method']: + pytest.xfail(xfailed_items[test_case['endpoint']]['message']) + + base_url = api_details['base_url'] headers = api_details['auth_headers'] response = None @@ -44,21 +49,6 @@ def test_api_endpoints(test_case, set_api_test_environment, api_healthcheck): assert response.status_code == 200 assert response.json()['error'] == 0 - except AssertionError as e: - # If the assertion fails, and is marked as xfail - if test_case['endpoint'] in xfailed_items.keys() and \ - test_case['method'] == xfailed_items[test_case['endpoint']]['method']: - pytest.xfail(xfailed_items[test_case['endpoint']]['message']) - - raise e - - else: - # If the test does not fail and is marked as xfail, issue a warning - if test_case['endpoint'] in xfailed_items.keys() and \ - test_case['method'] == xfailed_items[test_case['endpoint']]['method']: - warnings.warn(f"Test {test_case['endpoint']} should have failed due " - f"to {xfailed_items[test_case['endpoint']]['message']}") - finally: # Add useful information to report as stdout try: @@ -68,6 +58,5 @@ def test_api_endpoints(test_case, set_api_test_environment, api_healthcheck): except KeyError: print('No response available') - # Restart logic as before if test_case['method'] == 'put' and test_case['restart']: sleep(restart_delay) diff --git a/version.json b/version.json index 6d72845124..d3de04566e 100644 --- a/version.json +++ b/version.json @@ -1,4 +1,4 @@ { "version": "4.10.0", - "revision": "41000" + "revision": "41001" }