From b515dbcdfd2e193b8668b8cfceedf1034b845e0e Mon Sep 17 00:00:00 2001 From: Leon <82407168+sed-i@users.noreply.github.com> Date: Sat, 19 Aug 2023 05:54:40 +0300 Subject: [PATCH] Deprecate "web_external_url"; use strip_prefix (#508) * Use strip_prefix in favor of web_external_url * Drop k8s service patch * Disable scenario tests * fetch-lib --- config.yaml | 2 + .../grafana_k8s/v0/grafana_dashboard.py | 12 +- .../observability_libs/v0/cert_handler.py | 13 +- .../v1/kubernetes_service_patch.py | 341 ------------------ lib/charms/traefik_k8s/v1/ingress_per_unit.py | 19 +- src/charm.py | 71 ++-- tests/integration/test_external_url.py | 227 ------------ tests/scenario/conftest.py | 8 +- tests/unit/test_charm.py | 114 +----- tests/unit/test_charm_status.py | 1 - tests/unit/test_remote_write.py | 3 - tests/unit/test_web_external_url.py | 293 --------------- tox.ini | 7 +- 13 files changed, 81 insertions(+), 1030 deletions(-) delete mode 100644 lib/charms/observability_libs/v1/kubernetes_service_patch.py delete mode 100644 tests/integration/test_external_url.py delete mode 100644 tests/unit/test_web_external_url.py diff --git a/config.yaml b/config.yaml index 9c718572..c3f7e3cc 100644 --- a/config.yaml +++ b/config.yaml @@ -10,6 +10,8 @@ options: default: info web_external_url: description: | + DEPRECATED. This config option is no longer used, in favor of "skipPrefix". + The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index c20ab2b1..1d550c94 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -219,7 +219,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 32 +LIBPATCH = 33 logger = logging.getLogger(__name__) @@ -665,14 +665,14 @@ def _template_panels( continue if not existing_templates: datasource = panel.get("datasource") - if type(datasource) == str: + if isinstance(datasource, str): if "loki" in datasource: panel["datasource"] = "${lokids}" elif "grafana" in datasource: continue else: panel["datasource"] = "${prometheusds}" - elif type(datasource) == dict: + elif isinstance(datasource, dict): # In dashboards exported by Grafana 9, datasource type is dict dstype = datasource.get("type", "") if dstype == "loki": @@ -686,7 +686,7 @@ def _template_panels( logger.error("Unknown datasource format: skipping") continue else: - if type(panel["datasource"]) == str: + if isinstance(panel["datasource"], str): if panel["datasource"].lower() in replacements.values(): # Already a known template variable continue @@ -701,7 +701,7 @@ def _template_panels( if replacement: used_replacements.append(ds) panel["datasource"] = replacement or panel["datasource"] - elif type(panel["datasource"]) == dict: + elif isinstance(panel["datasource"], dict): dstype = panel["datasource"].get("type", "") if panel["datasource"].get("uid", "").lower() in replacements.values(): # Already a known template variable @@ -831,7 +831,7 @@ def _modify_panel(panel: dict, topology: dict, transformer: "CosTool") -> dict: if "datasource" not in panel.keys(): continue - if type(panel["datasource"]) == str: + if isinstance(panel["datasource"], str): if panel["datasource"] not in known_datasources: continue querytype = known_datasources[panel["datasource"]] diff --git a/lib/charms/observability_libs/v0/cert_handler.py b/lib/charms/observability_libs/v0/cert_handler.py index 34d4f154..15087be5 100644 --- a/lib/charms/observability_libs/v0/cert_handler.py +++ b/lib/charms/observability_libs/v0/cert_handler.py @@ -62,7 +62,7 @@ LIBID = "b5cd5cd580f3428fa5f59a8876dcbe6a" LIBAPI = 0 -LIBPATCH = 5 +LIBPATCH = 7 class CertChanged(EventBase): @@ -101,16 +101,17 @@ def __init__( peer_relation_name: Must match metadata.yaml. certificates_relation_name: Must match metadata.yaml. cert_subject: Custom subject. Name collisions are under the caller's responsibility. - extra_sans_dns: Any additional DNS names apart from FQDN. + extra_sans_dns: DNS names. If none are given, use FQDN. """ super().__init__(charm, key) self.charm = charm - self.cert_subject = cert_subject or charm.unit.name - self.cert_subject = charm.unit.name if not cert_subject else cert_subject + # We need to sanitize the unit name, otherwise route53 complains: + # "urn:ietf:params:acme:error:malformed" :: Domain name contains an invalid character + self.cert_subject = charm.unit.name.replace("/", "-") if not cert_subject else cert_subject - # Auto-include the fqdn and drop empty/duplicate sans - self.sans_dns = list(set(filter(None, (extra_sans_dns or []) + [socket.getfqdn()]))) + # Use fqdn only if no SANs were given, and drop empty/duplicate SANs + self.sans_dns = list(set(filter(None, (extra_sans_dns or [socket.getfqdn()])))) self.peer_relation_name = peer_relation_name self.certificates_relation_name = certificates_relation_name diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py deleted file mode 100644 index 64dd13ce..00000000 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""# KubernetesServicePatch Library. - -This library is designed to enable developers to more simply patch the Kubernetes Service created -by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a -service named after the application in the namespace (named after the Juju model). This service by -default contains a "placeholder" port, which is 65536/TCP. - -When modifying the default set of resources managed by Juju, one must consider the lifecycle of the -charm. In this case, any modifications to the default service (created during deployment), will be -overwritten during a charm upgrade. - -When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` -events which applies the patch to the cluster. This should ensure that the service ports are -correct throughout the charm's life. - -The constructor simply takes a reference to the parent charm, and a list of -[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the -service. For information regarding the `lightkube` `ServicePort` model, please visit the -`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). - -Optionally, a name of the service (in case service name needs to be patched as well), labels, -selectors, and annotations can be provided as keyword arguments. - -## Getting Started - -To get started using the library, you just need to fetch the library using `charmcraft`. **Note -that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** - -```shell -cd some-charm -charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch -cat << EOF >> requirements.txt -lightkube -lightkube-models -EOF -``` - -Then, to initialise the library: - -For `ClusterIP` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch(self, [port]) - # ... -``` - -For `LoadBalancer`/`NodePort` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) - self.service_patcher = KubernetesServicePatch( - self, [port], "LoadBalancer" - ) - # ... -``` - -Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") - udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") - sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") - self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) - # ... -``` - -Bound with custom events by providing `refresh_event` argument: -For example, you would like to have a configurable port in your charm and want to apply -service patch every time charm config is changed. - -```python -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch( - self, - [port], - refresh_event=self.on.config_changed - ) - # ... -``` - -Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library -does not try to make any API calls, or open any files during testing that are unlikely to be -present, and could break your tests. The easiest way to do this is during your test `setUp`: - -```python -# ... - -@patch("charm.KubernetesServicePatch", lambda x, y: None) -def setUp(self, *unused): - self.harness = Harness(SomeCharm) - # ... -``` -""" - -import logging -from types import MethodType -from typing import List, Literal, Optional, Union - -from lightkube import ApiError, Client -from lightkube.core import exceptions -from lightkube.models.core_v1 import ServicePort, ServiceSpec -from lightkube.models.meta_v1 import ObjectMeta -from lightkube.resources.core_v1 import Service -from lightkube.types import PatchType -from ops.charm import CharmBase -from ops.framework import BoundEvent, Object - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "0042f86d0a874435adef581806cddbbb" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 7 - -ServiceType = Literal["ClusterIP", "LoadBalancer"] - - -class KubernetesServicePatch(Object): - """A utility for patching the Kubernetes service set up by Juju.""" - - def __init__( - self, - charm: CharmBase, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - *, - refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - ): - """Constructor for KubernetesServicePatch. - - Args: - charm: the charm that is instantiating the library. - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - refresh_event: an optional bound event or list of bound events which - will be observed to re-apply the patch (e.g. on port change). - The `install` and `upgrade-charm` events would be observed regardless. - """ - super().__init__(charm, "kubernetes-service-patch") - self.charm = charm - self.service_name = service_name if service_name else self._app - self.service = self._service_object( - ports, - service_name, - service_type, - additional_labels, - additional_selectors, - additional_annotations, - ) - - # Make mypy type checking happy that self._patch is a method - assert isinstance(self._patch, MethodType) - # Ensure this patch is applied during the 'install' and 'upgrade-charm' events - self.framework.observe(charm.on.install, self._patch) - self.framework.observe(charm.on.upgrade_charm, self._patch) - self.framework.observe(charm.on.update_status, self._patch) - - # apply user defined events - if refresh_event: - if not isinstance(refresh_event, list): - refresh_event = [refresh_event] - - for evt in refresh_event: - self.framework.observe(evt, self._patch) - - def _service_object( - self, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - ) -> Service: - """Creates a valid Service representation. - - Args: - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - - Returns: - Service: A valid representation of a Kubernetes Service with the correct ports. - """ - if not service_name: - service_name = self._app - labels = {"app.kubernetes.io/name": self._app} - if additional_labels: - labels.update(additional_labels) - selector = {"app.kubernetes.io/name": self._app} - if additional_selectors: - selector.update(additional_selectors) - return Service( - apiVersion="v1", - kind="Service", - metadata=ObjectMeta( - namespace=self._namespace, - name=service_name, - labels=labels, - annotations=additional_annotations, # type: ignore[arg-type] - ), - spec=ServiceSpec( - selector=selector, - ports=ports, - type=service_type, - ), - ) - - def _patch(self, _) -> None: - """Patch the Kubernetes service created by Juju to map the correct port. - - Raises: - PatchFailed: if patching fails due to lack of permissions, or otherwise. - """ - try: - client = Client() - except exceptions.ConfigError as e: - logger.warning("Error creating k8s client: %s", e) - return - - try: - if self._is_patched(client): - return - if self.service_name != self._app: - self._delete_and_create_service(client) - client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) - except ApiError as e: - if e.status.code == 403: - logger.error("Kubernetes service patch failed: `juju trust` this application.") - else: - logger.error("Kubernetes service patch failed: %s", str(e)) - else: - logger.info("Kubernetes service '%s' patched successfully", self._app) - - def _delete_and_create_service(self, client: Client): - service = client.get(Service, self._app, namespace=self._namespace) - service.metadata.name = self.service_name # type: ignore[attr-defined] - service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 - client.delete(Service, self._app, namespace=self._namespace) - client.create(service) - - def is_patched(self) -> bool: - """Reports if the service patch has been applied. - - Returns: - bool: A boolean indicating if the service patch has been applied. - """ - client = Client() - return self._is_patched(client) - - def _is_patched(self, client: Client) -> bool: - # Get the relevant service from the cluster - try: - service = client.get(Service, name=self.service_name, namespace=self._namespace) - except ApiError as e: - if e.status.code == 404 and self.service_name != self._app: - return False - logger.error("Kubernetes service get failed: %s", str(e)) - raise - - # Construct a list of expected ports, should the patch be applied - expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] - # Construct a list in the same manner, using the fetched service - fetched_ports = [ - (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] - ] # noqa: E501 - return expected_ports == fetched_ports - - @property - def _app(self) -> str: - """Name of the current Juju application. - - Returns: - str: A string containing the name of the current Juju application. - """ - return self.charm.app.name - - @property - def _namespace(self) -> str: - """The Kubernetes namespace we're running in. - - Returns: - str: A string containing the name of the current Kubernetes namespace. - """ - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: - return f.read().strip() diff --git a/lib/charms/traefik_k8s/v1/ingress_per_unit.py b/lib/charms/traefik_k8s/v1/ingress_per_unit.py index 70f137b0..2f430e30 100644 --- a/lib/charms/traefik_k8s/v1/ingress_per_unit.py +++ b/lib/charms/traefik_k8s/v1/ingress_per_unit.py @@ -82,7 +82,7 @@ def _on_ingress_revoked(self, event: IngressPerUnitRevokedForUnitEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 13 +LIBPATCH = 14 log = logging.getLogger(__name__) @@ -114,6 +114,7 @@ def _on_ingress_revoked(self, event: IngressPerUnitRevokedForUnitEvent): "mode": {"type": "string"}, "strip-prefix": {"type": "string"}, "redirect-https": {"type": "string"}, + "scheme": {"type": "string"}, }, "required": ["model", "name", "host", "port"], } @@ -154,6 +155,7 @@ def _on_ingress_revoked(self, event: IngressPerUnitRevokedForUnitEvent): "mode": Optional[Literal["tcp", "http"]], "strip-prefix": Optional[bool], "redirect-https": Optional[bool], + "scheme": Optional[Literal["http", "https"]], }, total=False, ) @@ -485,7 +487,16 @@ def _get_requirer_unit_data(self, relation: Relation, remote_unit: Unit) -> Requ databag = relation.data[remote_unit] remote_data: Dict[str, Union[int, str]] = {} - for k in ("port", "host", "model", "name", "mode", "strip-prefix", "redirect-https"): + for k in ( + "port", + "host", + "model", + "name", + "mode", + "strip-prefix", + "redirect-https", + "scheme", + ): v = databag.get(k) if v is not None: remote_data[k] = v @@ -663,6 +674,7 @@ def __init__( listen_to: Literal["only-this-unit", "all-units", "both"] = "only-this-unit", strip_prefix: bool = False, redirect_https: bool = False, + scheme: typing.Callable[[], str] = lambda: "http", ): """Constructor for IngressPerUnitRequirer. @@ -692,6 +704,7 @@ def __init__( will be notified *twice* of changes to this unit's ingress!). strip_prefix: remove prefixes from the URL path. redirect_https: redirect incoming requests to HTTPS + scheme: callable returning the scheme to use when constructing the ingress url. """ super().__init__(charm, relation_name) self._stored.set_default(current_urls=None) # type: ignore @@ -703,6 +716,7 @@ def __init__( self._mode = mode self._strip_prefix = strip_prefix self._redirect_https = redirect_https + self._get_scheme = scheme self.listen_to = listen_to @@ -790,6 +804,7 @@ def provide_ingress_requirements(self, *, host: Optional[str] = None, port: int) "host": host, "port": str(port), "mode": self._mode, + "scheme": self._get_scheme(), } if self._strip_prefix: diff --git a/src/charm.py b/src/charm.py index fbdef89c..ceef5469 100755 --- a/src/charm.py +++ b/src/charm.py @@ -9,7 +9,7 @@ import re import socket from pathlib import Path -from typing import Dict, List, Optional, cast +from typing import Dict, Optional, cast from urllib.parse import urlparse import yaml @@ -24,10 +24,6 @@ KubernetesComputeResourcesPatch, adjust_resource_requirements, ) -from charms.observability_libs.v1.kubernetes_service_patch import ( - KubernetesServicePatch, - ServicePort, -) from charms.prometheus_k8s.v0.prometheus_scrape import ( MetricsEndpointConsumer, MetricsEndpointProvider, @@ -56,6 +52,7 @@ BlockedStatus, MaintenanceStatus, ModelError, + OpenedPort, WaitingStatus, ) from ops.pebble import Error as PebbleError @@ -103,10 +100,7 @@ def __init__(self, *args): self._port = 9090 self.container = self.unit.get_container(self._name) - self.service_patch = KubernetesServicePatch( - self, - [ServicePort(self._port, name=f"{self.app.name}")], - ) + self.set_ports() self.resources_patch = KubernetesComputeResourcesPatch( self, @@ -118,10 +112,17 @@ def __init__(self, *args): charm=self, key="prometheus-server-cert", peer_relation_name="prometheus-peers", - extra_sans_dns=self.sans(), + extra_sans_dns=[socket.getfqdn()], ) - self.ingress = IngressPerUnitRequirer(self, relation_name="ingress", port=self._port) + self.ingress = IngressPerUnitRequirer( + self, + relation_name="ingress", + port=self._port, + strip_prefix=True, + redirect_https=True, + scheme=lambda: "https" if self._is_tls_enabled() else "http", + ) self._topology = JujuTopology.from_charm(self) @@ -146,9 +147,7 @@ def __init__(self, *args): self.cert_handler.on.cert_changed, ], ) - self._prometheus_client = Prometheus( - f"{external_url.scheme}://localhost:9090/{external_url.path.strip('/')}" - ) + self._prometheus_client = Prometheus(f"{external_url.scheme}://localhost:9090") self.remote_write_provider = PrometheusRemoteWriteProvider( charm=self, @@ -171,7 +170,7 @@ def __init__(self, *args): self.on.leader_elected, self.ingress.on.ready_for_unit, self.ingress.on.revoked_for_unit, - self.on.config_changed, # web_external_url; also covers upgrade-charm + self.on.config_changed, # also covers upgrade-charm self.cert_handler.on.cert_changed, ], item=CatalogueItem( @@ -200,6 +199,22 @@ def __init__(self, *args): self.framework.observe(self.resources_patch.on.patch_failed, self._on_k8s_patch_failed) self.framework.observe(self.on.validate_configuration_action, self._on_validate_config) + def set_ports(self): + """Open necessary (and close no longer needed) workload ports.""" + planned_ports = { + OpenedPort("tcp", self._port), + } + actual_ports = self.unit.opened_ports() + + # Ports may change across an upgrade, so need to sync + ports_to_close = actual_ports.difference(planned_ports) + for p in ports_to_close: + self.unit.close_port(p.protocol, p.port) + + new_ports_to_open = planned_ports.difference(actual_ports) + for p in new_ports_to_open: + self.unit.open_port(p.protocol, p.port) + @property def metrics_path(self): """The metrics path, adjusted by ingress path (if any).""" @@ -299,6 +314,12 @@ def _default_config(self): return config + @property + def internal_url(self) -> str: + """Returns workload's FQDN. Used for ingress.""" + scheme = "https" if self._is_tls_enabled() else "http" + return f"{scheme}://{socket.getfqdn()}:{self._port}" + @property def external_url(self) -> str: """Return the external hostname to be passed to ingress via the relation. @@ -311,26 +332,11 @@ def external_url(self) -> str: routable from the outside, e.g., when deploying on MicroK8s on Linux. """ try: - if web_external_url := self.model.config.get("web_external_url"): - return web_external_url if ingress_url := self.ingress.url: return ingress_url except ModelError as e: logger.error("Failed obtaining external url: %s. Shutting down?", e) - return f"{'https' if self._is_tls_enabled() else 'http'}://{socket.getfqdn()}:{self._port}" - - def sans(self) -> List[str]: - """Return the list of SANs to be listed in a CSR. - - Can't use `self.external_url` because of the circular dependency, but we also don't need - it, because we don't need to have the ingress URL in the SANs, only "our" hostnames. - """ - sans = [socket.getfqdn()] - if web_external_url := self.model.config.get("web_external_url"): - # Make sure the config option is set to a valid URL (e.g. rather than a plain hostname) - if hostname := urlparse(web_external_url).hostname: - sans.append(hostname) - return sans + return self.internal_url def _is_tls_enabled(self): return bool(self.cert_handler.cert) @@ -602,8 +608,7 @@ def _generate_command(self) -> str: if self._web_config(): args.append(f"--web.config.file={WEB_CONFIG_PATH}") - external_url = self.external_url - args.append(f"--web.external-url={external_url}") + args.append(f"--web.external-url={self.internal_url}") if self.model.relations[DEFAULT_REMOTE_WRITE_RELATION_NAME]: args.append("--web.enable-remote-write-receiver") diff --git a/tests/integration/test_external_url.py b/tests/integration/test_external_url.py deleted file mode 100644 index 5ef335b9..00000000 --- a/tests/integration/test_external_url.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Test various aspects of `external_url`. - -1. When external_url is set (with path prefix) via traefik, default and self-scraping jobs are - 'up'. -2. When external_url is set (with path prefix) via config option to a different value, - default and self-scraping jobs are 'up'. -""" - -import asyncio -import json -import logging -import re -import subprocess -import urllib.request - -import pytest -from helpers import oci_image, unit_address -from pytest_operator.plugin import OpsTest -from workload import Prometheus - -logger = logging.getLogger(__name__) - -prometheus_app_name = "prometheus" -prometheus_resources = {"prometheus-image": oci_image("./metadata.yaml", "prometheus-image")} -external_prom_name = "external-prometheus" - -# Two prometheus units are sufficient to test potential interactions between multi-unit -# deployments and external_url -num_units = 2 - -# The period of time required to be idle before `wait_for_idle` returns is set to 90 sec because -# the default scrape_interval in prometheus is 1m. -idle_period = 90 - - -async def test_setup_env(ops_test: OpsTest): - await ops_test.model.set_config( - {"logging-config": "=WARNING; unit=DEBUG", "update-status-hook-interval": "60m"} - ) - - -@pytest.mark.xfail -async def test_deploy(ops_test: OpsTest, prometheus_charm): - await asyncio.gather( - ops_test.model.deploy( - prometheus_charm, - resources=prometheus_resources, - application_name=prometheus_app_name, - num_units=num_units, - trust=True, - ), - ops_test.model.deploy( - prometheus_charm, - resources=prometheus_resources, - application_name=external_prom_name, # to scrape the main prom - trust=True, - ), - ops_test.model.deploy( - "ch:traefik-k8s", - application_name="traefik", - channel="edge", - ), - ) - - await asyncio.gather( - ops_test.model.add_relation( - f"{prometheus_app_name}:self-metrics-endpoint", external_prom_name - ), - ops_test.model.wait_for_idle( - apps=[prometheus_app_name], - status="active", - wait_for_units=num_units, - timeout=300, - ), - ops_test.model.wait_for_idle( - apps=["traefik", external_prom_name], - wait_for_units=1, - timeout=300, - ), - ) - - -async def wait_for_ingress(ops_test: OpsTest): - """Returns when all ingressed prometheuses are ready. - - Wait until ingress is really ready. - Workaround for https://github.com/canonical/traefik-k8s-operator/issues/78. - """ - - async def get_ingressed_endpoints(): - action = ( - await ops_test.model.applications["traefik"] - .units[0] - .run_action("show-proxied-endpoints") - ) - res = (await action.wait()).results - # res looks like this: - # {'proxied-endpoints': - # '{"prometheus/0": {"url": "http://10.128.0.2:80/test-external-url-0lxt-prometheus-0"}, - # "prometheus/1": {"url": "http://10.128.0.2:80/test-external-url-0lxt-prometheus-1"} - # }', 'return-code': 0} - - proxied_endpoints = json.loads(res["proxied-endpoints"]) - endpoints = [v["url"] for v in proxied_endpoints.values()] - return endpoints - - ingressed_endpoints = await get_ingressed_endpoints() - logger.debug("Waiting for endpoints to become reachable: %s", ingressed_endpoints) - await ops_test.model.block_until( - lambda: all(await Prometheus(ep).is_ready() for ep in ingressed_endpoints) - ) - - -async def force_update_status(ops_test: OpsTest): - """Force an update-status emission and wait for active/idle.""" - await ops_test.model.set_config({"update-status-hook-interval": "10s"}) - await asyncio.sleep(11) - await ops_test.model.set_config({"update-status-hook-interval": "60m"}) - logger.debug("At this point, ingressed endpoints should become reachable and reldata updated") - await ops_test.model.wait_for_idle( - apps=[prometheus_app_name, "traefik", external_prom_name], - status="active", - timeout=600, - idle_period=idle_period, - ) - - -@pytest.mark.xfail -async def test_jobs_are_up_via_traefik(ops_test: OpsTest): - # Assuming metallb is already enabled - cmd = [ - "sh", - "-c", - "ip -4 -j route | jq -r '.[] | select(.dst | contains(\"default\")) | .prefsrc'", - ] - result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - ip = result.stdout.decode("utf-8").strip() - - # WHEN prometheus is related to traefik - await ops_test.model.add_relation(f"{prometheus_app_name}:ingress", "traefik") - - # Workaround to make sure everything is up-to-date: update-status - await ops_test.model.set_config({"update-status-hook-interval": "10s"}) - await asyncio.sleep(11) - await ops_test.model.set_config({"update-status-hook-interval": "60m"}) - - logger.info("At this point, after re-enabling metallb, traefik should become active") - await ops_test.model.wait_for_idle( - apps=[prometheus_app_name, "traefik", external_prom_name], - status="active", - timeout=600, - idle_period=idle_period, - ) - - # THEN the prometheus API is served on metallb's IP and the model-app-unit path - def prom_url(unit: int) -> str: - return f"http://{ip}/{ops_test.model_name}-{prometheus_app_name}-{unit}" - - # AND the default job is healthy (its scrape url must have the path for this to work) - prom_urls = [prom_url(i) + "/api/v1/targets" for i in range(num_units)] - for url in prom_urls: - logger.info("Attempting to fetch targets from url: %s", url) - targets = urllib.request.urlopen(url, None, timeout=2).read().decode("utf8") - logger.info("Response: %s", targets) - assert '"health":"up"' in targets - assert '"health":"down"' not in targets - - # Workaround to make sure everything is up-to-date: - # Ingress events are already passed as refresh_event to the MetricsEndpointProvider. - # TODO remove these two lines when https://github.com/canonical/traefik-k8s-operator/issues/78 - # is fixed. - await wait_for_ingress(ops_test) - await force_update_status(ops_test) - - # AND the self-scrape jobs are healthy (their scrape url must have the entire web_external_url - # for this to work). - external_prom_url = f"http://{await unit_address(ops_test, external_prom_name, 0)}:9090" - url = external_prom_url + "/api/v1/targets" - logger.info("Attempting to fetch targets from url: %s", external_prom_url) - targets = urllib.request.urlopen(url, None, timeout=2).read().decode("utf8") - logger.info("Response: %s", targets) - - # Make sure the ingressed targets, and not the old ones before ingress applied, are the ones - # being scraped. (Assuming the default scrape interval of 1 min passed since reldata was - # updated with the external url.) - for i in range(num_units): - assert f"{ops_test.model_name}-{prometheus_app_name}-{i}" in targets - - assert '"health":"up"' in targets - assert '"health":"down"' not in targets - assert ( - len(re.findall(r'"health":"up"', targets)) == 3 - ) # the default self scrape, and the two prom units - - -@pytest.mark.xfail -async def test_jobs_are_up_with_config_option_overriding_traefik(ops_test: OpsTest): - # GIVEN traefik ingress for prom - # (from previous test) - - # WHEN the `web_external_url` config option is set - await ops_test.model.applications[prometheus_app_name].set_config( - {"web_external_url": "http://foo.bar/baz"}, - ) - - await ops_test.model.wait_for_idle( - apps=[prometheus_app_name], - status="active", - timeout=300, - ) - - # THEN the prometheus api is served on the unit's IP and web_external_url's path - async def prom_url(unit: int) -> str: - return f"http://{await unit_address(ops_test, prometheus_app_name, unit)}:9090/baz" - - # AND the default job is healthy (its scrape url must have the path for this to work) - prom_urls = [await prom_url(i) + "/api/v1/targets" for i in range(num_units)] - for url in prom_urls: - logger.info("Attempting to fetch targets from url: %s", url) - targets = urllib.request.urlopen(url, None, timeout=2).read().decode("utf8") - logger.info("Response: %s", targets) - assert '"health":"up"' in targets - assert '"health":"down"' not in targets diff --git a/tests/scenario/conftest.py b/tests/scenario/conftest.py index 674d0437..636d8db1 100644 --- a/tests/scenario/conftest.py +++ b/tests/scenario/conftest.py @@ -14,16 +14,12 @@ def tautology(*_, **__) -> bool: @pytest.fixture def prometheus_charm(): - with patch("charm.KubernetesServicePatch"), patch( - "lightkube.core.client.GenericSyncClient" - ), patch.multiple( + with patch("lightkube.core.client.GenericSyncClient"), patch.multiple( "charm.KubernetesComputeResourcesPatch", _namespace="test-namespace", _patch=tautology, is_ready=tautology, - ), patch( - "prometheus_client.Prometheus.reload_configuration" - ), patch.multiple( + ), patch("prometheus_client.Prometheus.reload_configuration"), patch.multiple( "charm.PrometheusCharm", _promtool_check_config=lambda *_: ("stdout", ""), _prometheus_version="0.1.0", diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index e915d46f..640ad70a 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -30,7 +30,6 @@ @prom_multipatch class TestCharm(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @prom_multipatch @@ -55,21 +54,6 @@ def test_grafana_is_provided_port_and_source(self): ] self.assertEqual(grafana_host, "http://{}:{}".format(fqdn, "9090")) - @k8s_resource_multipatch - @patch("lightkube.core.client.GenericSyncClient") - def test_web_external_url_is_passed_to_grafana(self, *unused): - self.harness.set_leader(True) - self.harness.update_config({"web_external_url": "http://test:80/foo/bar"}) - - grafana_rel_id = self.harness.add_relation("grafana-source", "grafana") - self.harness.add_relation_unit(grafana_rel_id, "grafana/0") - - grafana_host = self.harness.get_relation_data( - grafana_rel_id, self.harness.model.unit.name - )["grafana_source_host"] - - self.assertEqual(grafana_host, "http://test:80/foo/bar") - def test_default_cli_log_level_is_info(self): plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--log.level"), "info") @@ -118,26 +102,14 @@ def test_ingress_relation_set(self): @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") - def test_web_external_url_has_precedence_over_ingress_relation(self, *unused): - self.harness.set_leader(True) - - self.harness.update_config({"web_external_url": "http://test:80"}) - - rel_id = self.harness.add_relation("ingress", "traefik-ingress") - self.harness.add_relation_unit(rel_id, "traefik-ingress/0") - - plan = self.harness.get_container_pebble_plan("prometheus") - self.assertEqual(cli_arg(plan, "--web.external-url"), "http://test:80") - - @k8s_resource_multipatch - @patch("lightkube.core.client.GenericSyncClient") - def test_web_external_url_set(self, *unused): + def test_web_external_has_no_effect(self, *unused): self.harness.set_leader(True) - self.harness.update_config({"web_external_url": "http://test:80"}) + self.harness.update_config({"web_external_url": "http://test:80/sub/path"}) plan = self.harness.get_container_pebble_plan("prometheus") - self.assertEqual(cli_arg(plan, "--web.external-url"), "http://test:80") + fqdn = socket.getfqdn() + self.assertEqual(cli_arg(plan, "--web.external-url"), f"http://{fqdn}:9090") def test_metrics_wal_compression_is_not_enabled_by_default(self): plan = self.harness.get_container_pebble_plan("prometheus") @@ -295,7 +267,6 @@ def setUp(self): self.mock_capacity = patcher.start() self.addCleanup(patcher.stop) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") def test_default_maximum_retention_size_is_80_percent(self, *unused): @@ -316,7 +287,6 @@ def test_default_maximum_retention_size_is_80_percent(self, *unused): plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--storage.tsdb.retention.size"), "0.8GB") - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") def test_multiplication_factor_applied_to_pvc_capacity(self, *unused): @@ -405,7 +375,6 @@ class TestAlertsFilename(unittest.TestCase): ] } - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch("prometheus_client.Prometheus.reload_configuration", lambda *_: True) @@ -425,7 +394,6 @@ def setUp(self, *unused): self.rel_id = self.harness.add_relation(RELATION_NAME, "remote-app") self.harness.add_relation_unit(self.rel_id, "remote-app/0") - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch("prometheus_client.Prometheus.reload_configuration", lambda *_: True) @@ -448,7 +416,6 @@ def test_charm_writes_meaningful_alerts_filename_1(self, *_): {"/etc/prometheus/rules/juju_ZZZ-model_a5edc336_zzz-app.rules"}, ) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch("prometheus_client.Prometheus.reload_configuration", lambda *_: True) @@ -472,7 +439,6 @@ def test_charm_writes_meaningful_alerts_filename_2(self, *_): {"/etc/prometheus/rules/juju_ZZZ-model_a5edc336_zzz-app.rules"}, ) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch("prometheus_client.Prometheus.reload_configuration", lambda *_: True) @@ -495,7 +461,6 @@ def test_charm_writes_meaningful_alerts_filename_3(self, *_): {"/etc/prometheus/rules/juju_remote-model_be44e4b8_remote-app.rules"}, ) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch("prometheus_client.Prometheus.reload_configuration", lambda *_: True) @@ -527,7 +492,6 @@ def raise_if_called(*_, **__): class TestPebblePlan(unittest.TestCase): """Test the pebble plan is kept up-to-date (situational awareness).""" - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch("prometheus_client.Prometheus.reload_configuration", lambda *_: True) @@ -592,75 +556,6 @@ def test_no_restart_nor_reload_when_nothing_changes(self, reload_config_patch, * # AND reload is not invoked reload_config_patch.assert_not_called() - @k8s_resource_multipatch - @patch("lightkube.core.client.GenericSyncClient") - @patch("socket.getfqdn", new=lambda *args: "fqdn") - @patch("ops.testing._TestingPebbleClient.replan_services") - @patch("ops.testing._TestingPebbleClient.start_services") - @patch("ops.testing._TestingPebbleClient.restart_services") - @patch("prometheus_client.Prometheus.reload_configuration") - def test_workload_restarts_when_some_config_options_change( - self, reload_config, restart, start, replan, *_ - ): - """Some config options go in as cli args and require workload restart.""" - # GIVEN a pebble plan - first_plan = self.plan - self.assertTrue(self.service.is_running()) - - # WHEN web_external_url is set - self.harness.update_config({"web_external_url": "http://test:80/foo/bar"}) - - # THEN pebble service is updated - second_plan = self.plan - self.assertEqual(cli_arg(second_plan, "--web.external-url"), "http://test:80/foo/bar") - self.assertNotEqual(first_plan.to_dict(), second_plan.to_dict()) - - # AND workload is restarted - self.assertTrue(self.service.is_running()) - self.assertTrue(restart.called or start.called or replan.called) - restart.reset_mock() - start.reset_mock() - replan.reset_mock() - - # BUT reload is not invoked - reload_config.assert_not_called() - - # WHEN web_external_url is changed - self.harness.update_config({"web_external_url": "http://test:80/foo/bar/baz"}) - - # THEN pebble service is updated - third_plan = self.plan - self.assertEqual(cli_arg(third_plan, "--web.external-url"), "http://test:80/foo/bar/baz") - self.assertNotEqual(second_plan.to_dict(), third_plan.to_dict()) - - # AND workload is restarted - self.assertTrue(self.service.is_running()) - self.assertTrue(restart.called or start.called or replan.called) - restart.reset_mock() - start.reset_mock() - replan.reset_mock() - - # BUT reload is not invoked - reload_config.assert_not_called() - - # WHEN web_external_url is unset - self.harness.update_config(unset=["web_external_url"]) - - # THEN pebble service is updated - fourth_plan = self.plan - self.assertEqual(cli_arg(fourth_plan, "--web.external-url"), "http://fqdn:9090") - self.assertNotEqual(third_plan.to_dict(), fourth_plan.to_dict()) - - # AND workload is restarted - self.assertTrue(self.service.is_running()) - self.assertTrue(restart.called or start.called or replan.called) - restart.reset_mock() - start.reset_mock() - replan.reset_mock() - - # BUT reload is not invoked - reload_config.assert_not_called() - @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.multiple( @@ -696,7 +591,6 @@ def test_workload_hot_reloads_when_some_config_options_change(self, reload_confi @prom_multipatch class TestTlsConfig(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @prom_multipatch diff --git a/tests/unit/test_charm_status.py b/tests/unit/test_charm_status.py index 1a7a0c83..b7aae061 100644 --- a/tests/unit/test_charm_status.py +++ b/tests/unit/test_charm_status.py @@ -18,7 +18,6 @@ logger = logging.getLogger(__name__) -@patch("charm.KubernetesServicePatch", lambda x, y: None) @prom_multipatch class TestActiveStatus(unittest.TestCase): """Feature: Charm's status should reflect the correctness of the config / relations. diff --git a/tests/unit/test_remote_write.py b/tests/unit/test_remote_write.py index 9cc85e28..5ea16d38 100644 --- a/tests/unit/test_remote_write.py +++ b/tests/unit/test_remote_write.py @@ -227,7 +227,6 @@ def setUp(self, *unused): self.mock_capacity.return_value = "1Gi" self.addCleanup(patcher.stop) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Prometheus, "reload_configuration", new=lambda _: True) @@ -245,7 +244,6 @@ def test_port_is_set(self, *unused): ) self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Prometheus, "reload_configuration", new=lambda _: True) @@ -267,7 +265,6 @@ def test_alert_rules(self, *unused): self.assertEqual(len(alerts), 1) self.assertDictEqual(alerts, ALERT_RULES) - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Prometheus, "reload_configuration", new=lambda _: True) diff --git a/tests/unit/test_web_external_url.py b/tests/unit/test_web_external_url.py deleted file mode 100644 index 891b823a..00000000 --- a/tests/unit/test_web_external_url.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright 2020 Canonical Ltd. -# See LICENSE file for licensing details. - -import json -import logging -import unittest -from unittest.mock import patch - -import ops -import yaml -from charm import PROMETHEUS_CONFIG, PrometheusCharm -from helpers import cli_arg, k8s_resource_multipatch, patch_network_get, prom_multipatch -from ops.testing import Harness - -ops.testing.SIMULATE_CAN_CONNECT = True -logger = logging.getLogger(__name__) - - -class TestWebExternalUrlForCharm(unittest.TestCase): - """Test that the web_external_url config option is rendered correctly for the charm. - - This entails: - - default job config (the same prom scraping itself via localhost:9090) - - self-scrape job (the requirer side of the prometheus_scrape relation data) - - remote-write url (relation data for the provider side, i.e. receive-remote-write) - """ - - def setUp(self, *unused): - self.harness = Harness(PrometheusCharm) - self.addCleanup(self.harness.cleanup) - - pvc_patcher = patch.object(PrometheusCharm, "_get_pvc_capacity") - self.pvc_mock = pvc_patcher.start() - self.addCleanup(pvc_patcher.stop) - self.harness.set_model_name("prometheus_model") - self.pvc_mock.return_value = "1Gi" - - for p in [ - k8s_resource_multipatch, - patch_network_get(), - patch("socket.getfqdn", new=lambda *args: "fqdn"), - patch("charm.KubernetesServicePatch", lambda x, y: None), - patch("lightkube.core.client.GenericSyncClient"), - prom_multipatch, - ]: - p.start() - self.addCleanup(p.stop) - - self.harness.set_leader(True) - - self.rel_id_self_metrics = self.harness.add_relation( - "self-metrics-endpoint", "remote-scraper-app" - ) - self.harness.add_relation_unit(self.rel_id_self_metrics, "remote-scraper-app/0") - - self.rel_id_remote_write = self.harness.add_relation( - "receive-remote-write", "remote-write-app" - ) - self.harness.add_relation_unit(self.rel_id_remote_write, "remote-write-app/0") - - def app_data(self, rel_name: str): - relation = self.harness.charm.model.get_relation(rel_name) - return relation.data[self.harness.charm.app] - - def unit_data(self, rel_name: str): - relation = self.harness.charm.model.get_relation(rel_name) - return relation.data[self.harness.charm.unit] - - @property - def container_name(self): - return self.harness.charm._name - - @property - def plan(self): - return self.harness.get_container_pebble_plan(self.container_name) - - @property - def config_file(self) -> dict: - return yaml.safe_load(self.container.pull(PROMETHEUS_CONFIG).read()) - - def test_web_external_url_not_set(self, *unused): - # GIVEN an initialized charm - # Note: harness does not re-init the charm on core events such as config-changed. - # https://github.com/canonical/operator/issues/736 - # For this reason, repeating the begin_with_initial_hooks() in every test method. - # When operator/736 is implemented, these lines can be moved to setUp(). - self.harness.update_config(unset=["web_external_url"]) - self.harness.begin_with_initial_hooks() - self.harness.container_pebble_ready("prometheus") - self.container = self.harness.charm.unit.get_container(self.container_name) - - # WHEN web_external_url is not set - # (This had to be done above, before `begin`, due to operator/736) - - # THEN pebble plan does NOT have the --web.external_url arg set - self.assertEqual(cli_arg(self.plan, "--web.external_url"), None) - - # AND default job is the default localhost:9090/metrics - scrape_config = self.config_file["scrape_configs"][0] - self.assertEqual(scrape_config["static_configs"][0]["targets"], ["fqdn:9090"]) - self.assertEqual(scrape_config["metrics_path"], "/metrics") - - # AND the self-scrape job points to prom's fqdn - self.assertEqual( - self.app_data("self-metrics-endpoint").get("scrape_jobs"), - json.dumps( - [ - { - "metrics_path": "/metrics", - "static_configs": [{"targets": ["*:9090"]}], - "scheme": "http", - } - ] - ), - ) - self.assertEqual( - self.unit_data("self-metrics-endpoint").get("prometheus_scrape_unit_address"), - "fqdn", - ) - - # AND the remote-write provider points to prom's fqdn - self.assertEqual( - self.unit_data("receive-remote-write").get("remote_write"), - '{"url": "http://fqdn:9090/api/v1/write"}', - ) - - def test_web_external_has_hostname_only(self, *unused): - # GIVEN an initialized charm - self.harness.update_config({"web_external_url": "http://foo.bar"}) - self.harness.begin_with_initial_hooks() - self.harness.container_pebble_ready("prometheus") - self.container = self.harness.charm.unit.get_container(self.container_name) - - # WHEN web_external_url is just a hostname - # (This had to be done above, before `begin`, due to operator/736) - - # THEN pebble plan has the --web.external_url set to http://foo.bar - self.assertEqual(cli_arg(self.plan, "--web.external-url"), "http://foo.bar") - - # AND default job is the default localhost:9090/metrics - scrape_config = self.config_file["scrape_configs"][0] - self.assertEqual(scrape_config["static_configs"][0]["targets"], ["fqdn:9090"]) - self.assertEqual(scrape_config["metrics_path"], "/metrics") - - # AND the self-scrape job advertises a wildcard target on port 80 - self.assertEqual( - self.app_data("self-metrics-endpoint").get("scrape_jobs"), - json.dumps( - [ - { - "metrics_path": "/metrics", - "static_configs": [{"targets": ["*:80"]}], - "scheme": "http", - } - ] - ), - ) - self.assertEqual( - self.unit_data("self-metrics-endpoint").get("prometheus_scrape_unit_address"), - "foo.bar", - ) - - # AND the remote-write provider points to prom's fqdn - self.assertEqual( - self.unit_data("receive-remote-write").get("remote_write"), - '{"url": "http://foo.bar/api/v1/write"}', - ) - - def test_web_external_has_hostname_and_port(self, *unused): - # GIVEN an initialized charm - self.harness.update_config({"web_external_url": "http://foo.bar:1234"}) - self.harness.begin_with_initial_hooks() - self.harness.container_pebble_ready("prometheus") - self.container = self.harness.charm.unit.get_container(self.container_name) - - # WHEN web_external_url is a hostname with a port - # (This had to be done above, before `begin`, due to operator/736) - - # THEN pebble plan has the --web.external_url set to http://foo.bar:1234 - self.assertEqual(cli_arg(self.plan, "--web.external-url"), "http://foo.bar:1234") - - # AND default job is the default localhost:9090/metrics - scrape_config = self.config_file["scrape_configs"][0] - self.assertEqual(scrape_config["static_configs"][0]["targets"], ["fqdn:9090"]) - self.assertEqual(scrape_config["metrics_path"], "/metrics") - - # AND the self-scrape job advertises a wildcard target on port 1234 - self.assertEqual( - self.app_data("self-metrics-endpoint").get("scrape_jobs"), - json.dumps( - [ - { - "metrics_path": "/metrics", - "static_configs": [{"targets": ["*:1234"]}], - "scheme": "http", - } - ] - ), - ) - self.assertEqual( - self.unit_data("self-metrics-endpoint").get("prometheus_scrape_unit_address"), - "foo.bar", - ) - - # AND the remote-write provider points to prom's fqdn - self.assertEqual( - self.unit_data("receive-remote-write").get("remote_write"), - '{"url": "http://foo.bar:1234/api/v1/write"}', - ) - - def test_web_external_has_hostname_and_path(self, *unused): - # GIVEN an initialized charm - self.harness.update_config({"web_external_url": "http://foo.bar/baz"}) - self.harness.begin_with_initial_hooks() - self.harness.container_pebble_ready("prometheus") - self.container = self.harness.charm.unit.get_container(self.container_name) - - # WHEN web_external_url includes a path - # (This had to be done above, before `begin`, due to operator/736) - - # THEN pebble plan has the --web.external_url set to http://foo.bar/baz - self.assertEqual(cli_arg(self.plan, "--web.external-url"), "http://foo.bar/baz") - - # AND default job is the default localhost:9090/baz/metrics - scrape_config = self.config_file["scrape_configs"][0] - self.assertEqual(scrape_config["static_configs"][0]["targets"], ["fqdn:9090"]) - self.assertEqual(scrape_config["metrics_path"], "/baz/metrics") - - # AND the self-scrape job advertises a wildcard target on port 80 - self.assertEqual( - self.app_data("self-metrics-endpoint").get("scrape_jobs"), - json.dumps( - [ - { - "metrics_path": "/metrics", - "static_configs": [{"targets": ["*:80"]}], - "scheme": "http", - } - ] - ), - ) - self.assertEqual( - self.unit_data("self-metrics-endpoint").get("prometheus_scrape_unit_address"), - "foo.bar", - ) - - # AND the remote-write provider points to prom's fqdn - self.assertEqual( - self.unit_data("receive-remote-write").get("remote_write"), - '{"url": "http://foo.bar/baz/api/v1/write"}', - ) - - def test_web_external_has_hostname_port_and_path(self, *unused): - # GIVEN an initialized charm - self.harness.update_config({"web_external_url": "http://foo.bar:1234/baz"}) - self.harness.begin_with_initial_hooks() - self.harness.container_pebble_ready("prometheus") - self.container = self.harness.charm.unit.get_container(self.container_name) - - # WHEN web_external_url includes a port and a path - # (This had to be done above, before `begin`, due to operator/736) - - # THEN pebble plan has the --web.external_url set to http://foo.bar:1234/baz - self.assertEqual(cli_arg(self.plan, "--web.external-url"), "http://foo.bar:1234/baz") - - # AND default job is the default localhost:9090/baz/metrics - scrape_config = self.config_file["scrape_configs"][0] - self.assertEqual(scrape_config["static_configs"][0]["targets"], ["fqdn:9090"]) - self.assertEqual(scrape_config["metrics_path"], "/baz/metrics") - - # AND the self-scrape job advertises a wildcard target on port 1234 - self.assertEqual( - self.app_data("self-metrics-endpoint").get("scrape_jobs"), - json.dumps( - [ - { - "metrics_path": "/metrics", - "static_configs": [{"targets": ["*:1234"]}], - "scheme": "http", - } - ] - ), - ) - self.assertEqual( - self.unit_data("self-metrics-endpoint").get("prometheus_scrape_unit_address"), - "foo.bar", - ) - - # AND the remote-write provider points to prom's fqdn - self.assertEqual( - self.unit_data("receive-remote-write").get("remote_write"), - '{"url": "http://foo.bar:1234/baz/api/v1/write"}', - ) diff --git a/tox.ini b/tox.ini index 1e0f95ce..05e16ef1 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ [tox] skipsdist=True skip_missing_interpreters = True -envlist = lint, static-{charm,lib}, unit +envlist = lint, static-{charm,lib}, unit, scenario [vars] src_path = {toxinidir}/src @@ -86,7 +86,10 @@ commands = allowlist_externals = /usr/bin/env -[testenv:scenario] +# Added a '-disabled' suffix so CI won't fail on scenario tests, due to +# - https://github.com/canonical/ops-scenario/issues/48 +# - https://github.com/canonical/ops-scenario/issues/49 +[testenv:scenario-disabled] description = Scenario tests deps = ops < 2.5.0 # https://github.com/canonical/ops-scenario/issues/48