diff --git a/README.md b/README.md index 477f2975..ba7e7648 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,8 @@ the same packages, the same developer(s). * Dynamically generated or conditional sub-handlers (an advanced feature). * Timers that tick as long as the resource exists, optionally with a delay since the last change. * Daemons that run as long as the resource exists (in threads or asyncio-tasks). + * Validating and mutating admission webhook (with dev-mode tunneling). + * Live in-memory indexing of resources or their excerpts. * Filtering with stealth mode (no logging): by arbitrary filtering functions, by labels/annotations with values, presence/absence, or dynamic callbacks. * In-memory all-purpose containers to store non-serializable objects for individual resources. diff --git a/docs/admission.rst b/docs/admission.rst new file mode 100644 index 00000000..5033eeb2 --- /dev/null +++ b/docs/admission.rst @@ -0,0 +1,664 @@ +================= +Admission control +================= + +Admission hooks are callbacks from Kubernetes to the operator before +the resources are created or modified. There are two types of hooks: + +* Validating admission webhooks. +* Mutating admission webhooks. + +For more information on the admission webhooks, +see the Kubernetes documentation: `Dynamic Admission Control`__. + +__ https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/ + + +Dependencies +============ + +To minimize Kopf's footprint in production systems, +it does not include heavy-weight dependencies needed only for development, +such as SSL cryptography and certificate generation libraries. +For example, Kopf's footprint with critical dependencies is 8.8 MB, +while ``cryptography`` would add 8.7 MB; ``certbuilder`` adds "only" 2.9 MB. + +To use all features of development-mode admission webhook servers and tunnels, +you have to install Kopf with an extra: + +.. code-block:: bash + + pip install kopf[dev] + +If this extra is not installed, Kopf will not generate self-signed certificates +and will run either with HTTP only or with externally provided certificates. + +Also, without this extra, Kopf will not be able to establish Ngrok tunnels. +Though, it will be able to use K3d & Minikube servers with magic hostnames. + +Any attempt to run it in a mode with self-signed certificates or tunnels +will raise a startup-time error with an explanation and suggested actions. + + +Validation handlers +=================== + +.. code-block:: python + + import kopf + + @kopf.on.validate('kopfexamples') + def say_hello(warnings: list[str], **_): + warnings.append("Verified with the operator's hook.") + + @kopf.on.validate('kopfexamples') + def check_numbers(spec, **_): + if not isinstance(spec.get('numbers', []), list): + raise kopf.AdmissionError("Numbers must be a list if present.") + + @kopf.on.validate('kopfexamples') + def convertible_numbers(spec, warnings, **_): + if isinstance(spec.get('numbers', []), list): + for val in spec.get('numbers', []): + if not isinstance(val, float): + try: + float(val) + except ValueError: + raise kopf.AdmissionError(f"Cannot convert {val!r} to a number.") + else: + warnings.append(f"{val!r} is not a number but can be converted.") + + @kopf.on.validate('kopfexamples') + def numbers_range(spec, **_): + if isinstance(spec.get('numbers', []), list): + if not all(0 <= float(val) <= 100 for val in spec.get('numbers', [])): + raise kopf.AdmissionError("Numbers must be below 0..100.", code=499) + +Each handler is mapped to its dedicated admission webhook and an endpoint +so that all handlers are executed in parallel independently of each other. +They must not expect that other checks are already performed by other handlers; +if such logic is needed, make it as one handler with a sequential execution. + + +Mutation handlers +================= + +To mutate the object, modify the :kwarg:`patch`. Changes to :kwarg:`body`, +:kwarg:`spec`, etc, will not be remembered (and are not possible): + +.. code-block:: python + + import kopf + + @kopf.on.mutate('kopfexamples') + def ensure_default_numbers(spec, patch, **_): + if 'numbers' not in spec: + patch.spec['numbers'] = [1, 2, 3] + + @kopf.on.mutate('kopfexamples') + def convert_numbers_if_possible(spec, patch, **_): + if 'numbers' in spec and isinstance(spec.get('numbers'), list): + patch.spec['numbers'] = [_maybe_number(v) for v in spec['numbers']] + + def _maybe_number(v): + try: + return float(v) + except ValueError: + return v + +The semantics is the same or as close as possible to the Kubernetes API's one. +``None`` values will remove the relevant keys. + +Under the hood, the patch object will remember each change +and will return a JSONPatch structure to Kubernetes. + + +Handler options +=============== + +Handlers have a limited capability to inform Kubernetes about its behaviour. +The following options are supported: + +``persisted`` (``bool``) webhooks will not be removed from the managed +configurations on exit; non-persisted webhooks will be removed if possible. +Such webhooks will prevent all admissions even when the operator is down. +This option has no effect if there is no managed configuration. +The webhook cleanup only happens on graceful exits; on forced exits, even +non-persisted webhooks might be persisted and block the admissions. + +``operation`` (``str``) will configure this handler/webhook to be called only +for a specific operation. For multiple operations, add several decorators. +Possible values are ``"CREATE"``, ``"UPDATE"``, ``"DELETE"``, ``"CONNECT"``. +The default is ``None``, i.e. all operations (equivalent to ``"*"``). + +``side_effects`` (``bool``) tells Kubernetes that the handler can have side +effects in non-dry-run mode. In dry-run mode, it must have no side effects. +The dry-run mode is passed to the handler as a :kwarg:`dryrun` kwarg. +The default is ``False``, i.e. the handler has no side effects. + +``ignore_failures`` (``bool``) marks the webhook as tolerant to errors. +This includes errors of the handler itself (disproved admissions), +so as HTTP/TCP communication errors when apiservers talk to the webhook server. +By default, an inaccessible or rejecting webhook blocks the admission. + +The developers can use regular :doc:`/filters`. In particular, the ``labels`` +will be passed to the webhook configuration as ``.webhooks.*.objectSelector`` +for optimization purposes: so that admissions are not even sent to the webhook +server if it is known that they will be filtered out and therefore allowed. + +Server-side filtering supports everything except callbacks: +i.e., ``"strings"``, ``kopf.PRESENT`` and ``kopf.ABSENT`` markers. +The callbacks will be evaluated after the admission review request is received. + +.. warning:: + + Be careful with the builtin resources and admission hooks. + If a handler is broken or misconfigured, it can prevent creating + those resources, e.g. pods, in the whole cluster. This will render + the cluster unusable until the configuration is manually removed. + + Start the development in local clusters, validating/mutating the custom + resources first, and enable ``ignore_errors`` initially. + Enable the strict mode of the handlers only when stabilised. + + +In-memory containers +==================== + +Kopf provides :doc:`/memos` for each resource. However, webhooks can happen +before a resource is created. This affects how the memos work. + +For update and deletion requests, the actual memos of the resources are used. + +For the admission requests on resource creation, a memo is created and discarded +immediately. It means that the creation's memos are useless at the moment. + +This can change in the future: the memos of resource creation attempts +will be preserved for a limited but short time (configurable), +so that the values could be shared between the admission and the handling, but +so that there are no memory leaks if the resource never succeeds in admission. + + +Admission warnings +================== + +Starting with Kubernetes 1.19 (check with ``kubectl version``), +admission warnings can be returned from admission handlers. + +To populate warnings, accept a **mutable** :kwarg:`warnings` (``list[str]``) +and add strings to it: + +.. code-block:: python + + import kopf + + @kopf.on.validate('kopfexamples') + def ensure_default_numbers(spec, warnings: list[str], **_): + if spec.get('field') == 'value': + warnings.append("The default value is used. It is okay but worth changing.") + +The admission warnings look like this (requires kubectl 1.19+): + +.. code-block:: none + + $ kubectl create -f examples/obj.yaml + Warning: The default value is used. It is okay but worth changing. + kopfexample.kopf.dev/kopf-example-1 created + +.. note:: + + Despite Kopf's intention to utilise Python's native features that + semantically map to Kubernetes's or operators' features, + Python StdLib's :mod:`warnings` is not used for admission warnings + (the initial idea was to catch `UserWarning` and ``warnings.warn("...")`` + calls and return them as admission warnings). + + The StdLib's module is documented as thread-unsafe (therefore, task-unsafe) + and requires hacking the global state which might affect other threads + and/or tasks -- there is no clear way to do this consistently. + + This may be revised in the future and provided as an additional feature. + + +Admission errors +================ + +Unlike with regular handlers and their error handling logic (:doc:`/errors`), +the webhooks cannot do retries or backoffs. So, the ``backoff=``, ``errors=``, +``retries=``, ``timeout=`` options are not accepted on the admission handlers. + +`kopf.PermanentError` and `kopf.TemporaryError` are treated ...TODO: how? + +A special exception `kopf.AdmissionError` is provided to customize the status +code and the message of the admission review response. + +.. code-block:: python + + @kopf.on.validate('kopfexamples') + def validate1(spec, **_): + if spec.get('field') == 'value': + raise kopf.AdmissionError("Meh! I don't like it. Change the field.") + +The admission errors look like this (manually indented for readability): + +.. code-block:: none + + $ kubectl create -f examples/obj.yaml + Error from server: error when creating "examples/obj.yaml": + admission webhook "validate1.auto.kopf.dev" denied the request: + Meh! I don't like it. Change the field. + +Note that Kubernetes executes multiple webhooks in parallel. +The first one to return the result is the one and the only shown; +other webhooks are not shown even if they fail with useful messages. +With multiple failing admissions, the message will be varying on each attempt. + + +Webhook management +================== + +Admission (both for validation and for mutation) only works when the cluster +has special resources created: either ``kind: ValidatingWebhookConfiguration`` +or ``kind: MutatingWebhookConfiguration`` or both. +Kopf can automatically manage the webhook configuration resources +in the cluster if it is given RBAC permissions to do so. + +To manage the validating/mutating webhook configurations, Kopf requires +the following RBAC permissions in its service account (see :doc:`/deployment`): + +.. code-block:: yaml + + apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRole + rules: + - apiGroups: [admissionregistration.k8s.io/v1, admissionregistration.k8s.io/v1beta1] + resources: [validatingwebhookconfigurations, mutatingwebhookconfigurations] + verbs: [create, patch] + +By default, configuration management is disabled (for safety and stability). +To enable, set the name of the managed configuration objects: + +.. code-block:: python + + @kopf.on.startup() + def configure(settings: kopf.OperatorSettings, **_): + settings.admission.managed = 'auto.kopf.dev' + +Multiple records for webhooks will be added or removed for multiple resources +to those configuration objects as needed. Existing records will be overwritten. +If the configuration resource is absent, it will be created +(but at most one for validating and one for mutating configurations). + +Kopf manages the webhook configurations according to how Kopf itself believes +it is sufficient to achieve the goal. Many available Kubernetes features +are not covered by this management. To use these features and control +the configuration with precision, operator developers can disable +the automated management and take care of the configuration manually. + + +Servers and tunnels +=================== + +Kubernetes admission webhooks are designed to be passive rather than active +(from the operator's point of view; vice versa from Kubernetes's point of view). +It means, the webhooks must passively wait for requests via an HTTPS endpoint. +There is currently no official way how an operator can actively pull or poll +the admission requests and send the responses back +(as it is done for all other resource changes streamed via the Kubernetes API). + +It is typically non-trivial to forward the requests from a remote or isolated +cluster to a local host machine where the operator is running for development. + +However, one of Kopf's main promises is to work the same way both in-cluster +and on the developers' machines. It cannot be made "the same way" for webhooks, +but Kopf attempts to make these modes similar to each other code-wise. + +To fulfil its promise, Kopf delegates this task to webhook servers and tunnels, +which are capable of receiving the webhook requests, marshalling them +to the handler callbacks, and then returning the results to Kubernetes. + +Due to numerous ways of how the development and production environments can be +configured, Kopf does not provide a default configuration for a webhook server, +so it must be set by the developer: + +.. code-block:: python + + @kopf.on.startup() + def configure(settings: kopf.OperatorSettings, **_): + if os.environ.get('ENVIRONMENT') is None: + # Only as an example: + settings.admission.server = kopf.WebhookK3dServer(port=54321) + settings.admission.managed = 'auto.kopf.dev' + else: + # Assuming that the configuration is done manually: + settings.admission.server = kopf.WebhookServer(addr='0.0.0.0', port=8080) + settings.admission.managed = 'auto.kopf.dev' + +If there are admission handlers present and no webhook server/tunnel configured, +the operator will fail at startup with an explanatory message. + +Kopf provides several webhook servers and tunnels out of the box, +each with its configuration parameters (see their descriptions): + +*Webhook servers* listen on an HTTPS port locally and handle requests. + +* `kopf.WebhookServer` is helpful for local development and ``curl`` and + a Kubernetes cluster that runs directly on the host machine and can access it. + It is also used internally by most tunnels for a local target endpoint. +* `kopf.WebhookK3dServer` is for local K3d/K3s clusters (even in a VM), + accessing the server via a magical hostname ``host.k3d.internal``. +* `kopf.WebhookMinikubeServer` for local Minikube clusters (even in a VM), + accessing the server via a magical hostname ``host.minikube.internal``. + +*Webhook tunnels* forward the webhook requests through external endpoints +usually to a locally running *webhook server*. + +* `kopf.WebhookNgrokTunnel` established a tunnel through ngrok_. +* `kopf.WebhookInletsTunnel` tunnels the traffic through inlets_. + +.. _ngrok: https://ngrok.com/ +.. _inlets: https://inlets.dev/ + +.. note:: + External tunnelling services usually limit the number of requests. + For example, ngrok has a limit of 40 requests per minute on a free plan. + + The services also usually provide paid subscriptions to overcome that limit. + It might be a wise idea to support the service you rely on with some money. + If that is not an option, you can implement free tunnelling your way. + +.. note:: + A reminder: using development-mode tunnels and self-signed certificates + requires an extra: ``pip install kopf[dev]``. + + +Authenticate apiservers +======================= + +There are many ways how webhook clients (Kubernetes's apiservers) +can authenticate against webhook servers (the operator's webhooks), +and even more ways to validate the supplied credentials. + +More on that, apiservers cannot be configured to authenticate against +webhooks dynamically at runtime, as `this requires control-plane configs`__, +which are out of reach of Kopf. + +__ https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#authenticate-apiservers + +For simplicity, Kopf does not authenticate webhook clients. + +However, Kopf's built-in webhook servers & tunnels extract the very basic +request information and pass it to the admission handlers +for additional verifications and possibly for authentification: + +* :kwarg:`headers` (``Mapping[str, str]``) contains all HTTPS headers, + including ``Authorization: Basic ...``, ``Authorization: Bearer ...``. +* :kwarg:`sslpeer` (``Mapping[str, Any]``) contains the SSL peer information + as returned by `ssl.SSLSocket.getpeercert` or ``None`` if no proper SSL + certificate is provided by a client (i.e. by apiservers talking to webhooks). + +An example of headers: + +.. code-block:: python + + {'Host': 'localhost:54321', + 'Authorization': 'Basic dXNzc2VyOnBhc3Nzdw==', # base64("ussser:passsw") + 'Content-Length': '844', + 'Content-Type': 'application/x-www-form-urlencoded'} + +An example of a self-signed peer certificate presented to ``sslpeer``: + +.. code-block:: python + + {'subject': ((('commonName', 'Example Common Name'),), + (('emailAddress', 'example@kopf.dev'),)), + 'issuer': ((('commonName', 'Example Common Name'),), + (('emailAddress', 'example@kopf.dev'),)), + 'version': 1, + 'serialNumber': 'F01984716829537E', + 'notBefore': 'Mar 7 17:12:20 2021 GMT', + 'notAfter': 'Mar 7 17:12:20 2022 GMT'} + +To reproduce this without configuring apiservers: + +.. code-block:: bash + + openssl req -x509 -newkey rsa:2048 -keyout client-key.pem -out client-cert.pem -days 365 -nodes + # Country Name (2 letter code) []: + # State or Province Name (full name) []: + # Locality Name (eg, city) []: + # Organization Name (eg, company) []: + # Organizational Unit Name (eg, section) []: + # Common Name (eg, fully qualified host name) []:Example Common Name + # Email Address []:example@kopf.dev + +.. code-block:: python + + import kopf + + @kopf.on.startup() + def config(settings: kopf.OperatorSettings, **_): + settings.admission.managed = 'auto.kopf.dev' + settings.admission.server = kopf.WebhookServer(cafile='client-cert.pem') + + @kopf.on.validate('kex') + def show_auth(headers, sslpeer, **_): + print(f'{headers=}') + print(f'{sslpeer=}') + +.. code-block:: bash + + cat >review.json << EOF + { + "kind": "AdmissionReview", + "apiVersion": "admission.k8s.io/v1", + "request": { + "uid": "1ca13837-ad60-4c9e-abb8-86f29d6c0e84", + "kind": {"group": "kopf.dev", "version": "v1", "kind": "KopfExample"}, + "resource": {"group": "kopf.dev", "version": "v1", "resource": "kopfexamples"}, + "requestKind": {"group": "kopf.dev", "version": "v1", "kind": "KopfExample"}, + "requestResource": {"group": "kopf.dev", "version": "v1", "resource": "kopfexamples"}, + "name": "kopf-example-1", + "namespace": "default", + "operation": "CREATE", + "userInfo": {"username": "admin", "uid": "admin", "groups": ["system:masters", "system:authenticated"]}, + "object": { + "apiVersion": "kopf.dev/v1", + "kind": "KopfExample", + "metadata": {"name": "kopf-example-1", "namespace": "default"} + }, + "oldObject": null, + "dryRun": true + } + } + EOF + +.. code-block:: bash + + curl --insecure --cert client-cert.pem --key client-key.pem https://ussser:passsw@localhost:54321 -d @review.json + # {"apiVersion": "admission.k8s.io/v1", "kind": "AdmissionReview", + # "response": {"uid": "1ca13837-ad60-4c9e-abb8-86f29d6c0e84", + # "allowed": true, + # "warnings": ["SSL peer is Example Common Name."]}} + +When and if needed, the operator developers can implement their servers/tunnels +with their customised authentication methods. + + +Debugging with SSL +================== + +Kubernetes requires that the webhook URLs are always HTTPS, never HTTP. +For this reason, Kopf runs the webhook servers/tunnels with HTTPS by default. + +If a webhook server is configured without a server certificate, +a self-signed certificate is generated at startup, and only HTTPS is served. + +.. code-block:: python + + @kopf.on.startup() + def config(settings: kopf.OperatorSettings, **_): + settings.admission.server = kopf.WebhookServer() + +That endpoint can be accessed directly with ``curl``: + +.. code-block:: bash + + curl --insecure https://localhost:54321 -d @review.json + +It is possible to store the generated certificate itself and use as a CA: + +.. code-block:: python + + @kopf.on.startup() + def config(settings: kopf.OperatorSettings, **_): + settings.admission.server = kopf.WebhookServer(cadump='selfsigned.pem') + +.. code-block:: bash + + curl --cacert selfsigned.pem https://localhost:54321 -d @review.json + +For production, a properly generated certificate should be used. +The CA, if not specified, is assumed to be in the default trust chain. +This applies to all servers: `kopf.WebhookServer`, `kopf.WebhookK3dServer`, etc. + +.. code-block:: python + + @kopf.on.startup() + def config(settings: kopf.OperatorSettings, **_): + settings.admission.server = kopf.WebhookServer( + cafile='ca.pem', # or cadata, or capath. + certfile='cert.pem', + pkeyfile='pkey.pem', + password='...') # for the private key, if used. + +.. note:: + ``cadump`` (output) can be used together with ``cafile``/``cadata`` (input), + though it will be the exact copy of the CA and does not add any benefit. + +As a last resort, if SSL is still a problem, it can be disabled and an insecure +HTTP server can be used. This does not work with Kubernetes but can be used +for direct access during development; it is also used by some tunnels that +do not support HTTPS tunnelling (or require paid subscriptions): + +.. code-block:: python + + @kopf.on.startup() + def config(settings: kopf.OperatorSettings, **_): + settings.admission.server = kopf.WebhookServer(insecure=True) + + +Custom servers/tunnels +====================== + +Operator developers can provide their custom servers and tunnels by implementing +an async iterator over client configs (`kopf.WebhookClientConfig`). +There are two ways to implement servers/tunnels. + +One is a simple but non-configurable coroutine: + +.. code-block:: python + + async def mytunnel(fn: kopf.WebhookFn) -> AsyncIterator[kopf.WebhookClientConfig]: + ... + yield client_config + await asyncio.Event().wait() + + @kopf.on.startup() + def configure(settings: kopf.OperatorSettings, **_): + settings.admission.server = mytunnel # no arguments! + +Another one is a slightly more complex but configurable class: + +.. code-block:: python + + class MyTunnel: + async def __call__(self, fn: kopf.WebhookFn) -> AsyncIterator[kopf.WebhookClientConfig]: + ... + yield client_config + await asyncio.Event().wait() + + @kopf.on.startup() + def configure(settings: kopf.OperatorSettings, **_): + settings.admission.server = MyTunnel() # arguments are possible. + +The iterator MUST accept a positional argument of type `kopf.WebhookFn` +and call it with the JSON-parsed payload when a review request is received; +then, it MUST ``await`` the result and JSON-serialize it as a review response: + +.. code-block:: python + + response = await fn(request) + +Optionally (though highly recommended), several keyword arguments can be passed +to extend the request data (if not passed, they all use ``None`` by default): + +* ``webhook`` (``str``) -- to execute only one specific handler/webhook. + The id usually comes from the URL, which the framework injects automatically. + It is highly recommended to provide at least this hint: + otherwise, all admission handlers are executed, with mutating and validating + handlers mixed, which can lead to mutating patches returned for validation + requests, which in turn will fail the admission on the Kubernetes side. +* ``headers`` (``Mapping[str, str]``) -- the HTTPS headers of the request + are passed to handlers as :kwarg:`headers` and can be used for authentication. +* ``sslpeer`` (``Mapping[str, Any]``) -- the SSL peer information taken from + the client certificate (if provided and if verified); it is passed + to handlers as :kwarg:`sslpeer` and can be used for authentication. + +.. code-block:: python + + response = await fn(request, webhook=handler_id, headers=headers, sslpeer=sslpeer) + +There is no guarantee on what is happening in the callback and how it works. +The exact implementation can be changed in the future without warning: e.g., +the framework can either invoke the admission handlers directly in the callback +or queue the request for a background execution and return an awaitable future. + +The iterator must yield one or more client configs. Configs are dictionaries +that go to the managed webhook configurations as ``.webhooks.*.clientConfig``. + +Regardless of how the client config is created, the framework extends the URLs +in the ``url`` and ``service.path`` fields with the handler/webhook ids, +so that a URL ``https://myhost/path`` becomes ``https://myhost/path/handler1``, +``https://myhost/path/handler2``, so on. + +Remember: Kubernetes prohibits using query parameters and fragments in the URLs. + +In most cases, only one yielded config is enough if the server is going +to serve the requests at the same endpoint. +In rare cases when the endpoint changes over time (e.g. for dynamic tunnels), +the server/tunnel should yield a new config every time the endpoint changes, +and the webhook manager will reconfigure all managed webhooks accordingly. + +The server/tunnel must hold control by running the server or by sleeping. +To sleep forever, use ``await asyncio.Event().wait()``. If the server/tunnel +exits unexpectedly, this causes the whole operator to exit. + +If the goal is to implement a tunnel only, but not a custom webhook server, +it is highly advised to inherit from or to directly use `kopf.WebhookServer` +to run a locally listening endpoint. This server implements all URL parsing +and request handling logic well-aligned with the rest of the framework: + +.. code-block:: python + + # Inheritance: + class MyTunnel1(kopf.WebhookServer): + async def __call__(self, fn: kopf.WebhookFn) -> AsyncIterator[kopf.WebhookClientConfig]: + ... + for client_config in super().__call__(fn): + ... # renew a tunnel, adjust the config + yield client_config + + # Composition: + class MyTunnel2: + async def __call__(self, fn: kopf.WebhookFn) -> AsyncIterator[kopf.WebhookClientConfig]: + server = kopf.WebhookServer(...) + for client_config in server(fn): + ... # renew a tunnel, adjust the config + yield client_config + +.. seealso:: + For reference implementations of servers and tunnels, + see the `provided webhooks`__. + +__ https://github.com/nolar/kopf/blob/master/kopf/toolkits/webhooks.py diff --git a/docs/deployment-rbac.yaml b/docs/deployment-rbac.yaml index 1cb3198e..8b4ef585 100644 --- a/docs/deployment-rbac.yaml +++ b/docs/deployment-rbac.yaml @@ -24,6 +24,11 @@ rules: resources: [namespaces] verbs: [list, watch] + # Framework: admission webhook configuration management. + - apiGroups: [admissionregistration.k8s.io/v1, admissionregistration.k8s.io/v1beta1] + resources: [validatingwebhookconfigurations, mutatingwebhookconfigurations] + verbs: [create, patch] + # Application: read-only access for watching cluster-wide. - apiGroups: [kopf.dev] resources: [kopfexamples] diff --git a/docs/index.rst b/docs/index.rst index b976d9fb..cf79ae16 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -39,6 +39,7 @@ Kopf: Kubernetes Operators Framework scopes memos indexing + admission .. toctree:: :maxdepth: 2 diff --git a/docs/kwargs.rst b/docs/kwargs.rst index 820e98d1..f082a116 100644 --- a/docs/kwargs.rst +++ b/docs/kwargs.rst @@ -261,3 +261,75 @@ Its ``.wait()`` method can be used to replace ``time.sleep()`` or ``asyncio.sleep()`` for faster (instant) termination on resource deletion. See more: :doc:`daemons`. + + +Resource admission kwargs +========================= + +.. kwarg:: dryrun + +Dry run +------- + +Admission handlers, both validating and mutating, must skip any side effects +if ``dryrun`` is ``True``. It is ``True`` when a dry-run API request is made, +e.g. with ``kubectl --dry-run=server ...``. + +Regardless of ``dryrun`, the handlers must not make any side effects +unless they declare themselves as ``side_effects=True``. + +See more: :doc:`admission`. + + +.. kwarg:: warnings + +Admission warnings +------------------ + +``warnings`` (``list[str]``) is a **mutable** list of string used as warnings. +The admission webhook handlers can populate the list with warnings (strings), +and the webhook servers/tunnels return them to Kubernetes, which shows them +to ``kubectl``. + +See more: :doc:`admission`. + + +.. kwarg:: userinfo + +User information +---------------- + +``userinfo`` (``Mapping[str, Any]``) is an information about a user that +sends the API request to Kubernetes. + +It usually contains the keys ``'username'``, ``'uid'``, ``'groups'``, +but this might change in the future. The information is provided exactly +as Kubernetes sends it in the admission request. + +See more: :doc:`admission`. + + +.. kwarg:: headers +.. kwarg:: sslpeer + +Request credentials +------------------- + +For rudimentary authentication and authorization, Kopf passes the information +from the admission requests to the admission handlers as is, +without additional interpretation of it. + +``headers`` (``Mapping[str, str]``) contains all HTTPS request headers, +including ``Authorization: Basic ...``, ``Authorization: Bearer ...``. + +``sslpeer`` (``Mapping[str, Any]``) contains the SSL peer information +as returned by `ssl.SSLSocket.getpeercert`. It is ``None`` if no proper +SSL client certificate was provided (i.e. by apiservers talking to webhooks), +or if the SSL protocol could not verify the provided certificate with its CA. + +.. note:: + This is an identity of the apiservers that send the admission request, + not of the user or an app that sends the API request to Kubernetes. + For the user's identity, use :kwarg:`userinfo`. + +See more: :doc:`admission`. diff --git a/examples/17-admission/example.py b/examples/17-admission/example.py new file mode 100644 index 00000000..dcba7a4a --- /dev/null +++ b/examples/17-admission/example.py @@ -0,0 +1,59 @@ +import pathlib +from typing import Dict + +import kopf + + +@kopf.on.startup() +def config(settings: kopf.OperatorSettings, **_): + ROOT = (pathlib.Path.cwd() / pathlib.Path(__file__)).parent.parent.parent + settings.admission.managed = 'auto.kopf.dev' + settings.admission.server = kopf.WebhookK3dServer(cadump=ROOT/'ca.pem') + ## Other options (see the docs): + # settings.admission.server = kopf.WebhookServer() + # settings.admission.server = kopf.WebhookServer(certfile=ROOT/'cert.pem', pkeyfile=ROOT/'key.pem', port=1234) + # settings.admission.server = kopf.WebhookK3dServer(cadump=ROOT/'ca.pem') + # settings.admission.server = kopf.WebhookK3dServer(certfile=ROOT/'k3d-cert.pem', pkeyfile=ROOT/'k3d-key.pem', port=1234) + # settings.admission.server = kopf.WebhookMinikubeServer(port=1234, cadump=ROOT/'ca.pem', verify_cafile=ROOT/'client-cert.pem') + # settings.admission.server = kopf.WebhookNgrokTunnel() + # settings.admission.server = kopf.WebhookNgrokTunnel(binary="/usr/local/bin/ngrok", token='...', port=1234) + # settings.admission.server = kopf.WebhookNgrokTunnel(binary="/usr/local/bin/ngrok", port=1234, path='/xyz', region='eu') + + +@kopf.on.validate('kex') +def authhook(headers, sslpeer, warnings, **_): + # print(f'headers={headers}') + # print(f'sslpeer={sslpeer}') + if not sslpeer: + warnings.append("SSL peer is not identified.") + else: + common_name = None + for key, val in sslpeer['subject'][0]: + if key == 'commonName': + common_name = val + break + else: + warnings.append("SSL peer's common name is absent.") + if common_name is not None: + warnings.append(f"SSL peer is {common_name}.") + + +@kopf.on.validate('kex') +def validate1(spec, dryrun, **_): + if not dryrun and spec.get('field') == 'wrong': + raise kopf.AdmissionError("Meh! I don't like it. Change the field.") + + +@kopf.on.validate('kex', field='spec.field', value='not-allowed') +def validate2(**_): + raise kopf.AdmissionError("I'm too lazy anyway. Go away!", code=555) + + +@kopf.on.mutate('kex', labels={'somelabel': 'somevalue'}) +def mutate1(patch: kopf.Patch, **_): + patch.spec['injected'] = 123 + + +# Marks for the e2e tests (see tests/e2e/test_examples.py): +# We do not care: pods can have 6-10 updates here. +E2E_SUCCESS_COUNTS = {} # type: Dict[str, int] diff --git a/examples/99-all-at-once/example.py b/examples/99-all-at-once/example.py index a7e0acf1..f17ec3a3 100644 --- a/examples/99-all-at-once/example.py +++ b/examples/99-all-at-once/example.py @@ -17,7 +17,7 @@ E2E_CREATION_STOP_WORDS = ['Creation is processed:'] E2E_DELETION_STOP_WORDS = ['Deleted, really deleted'] E2E_SUCCESS_COUNTS = {'create_1': 1, 'create_2': 1, 'create_pod': 1, 'delete': 1, 'startup_fn_simple': 1, 'startup_fn_retried': 1, 'cleanup_fn': 1} -E2E_FAILURE_COUNTS: Dict[str, int] = {} +E2E_FAILURE_COUNTS = {} # type: Dict[str, int] E2E_TRACEBACKS = True diff --git a/kopf/__init__.py b/kopf/__init__.py index 4d7a44eb..eb978ef1 100644 --- a/kopf/__init__.py +++ b/kopf/__init__.py @@ -32,6 +32,9 @@ from kopf.reactor import ( lifecycles, # as a separate name on the public namespace ) +from kopf.reactor.admission import ( + AdmissionError, +) from kopf.reactor.handling import ( TemporaryError, PermanentError, @@ -132,6 +135,16 @@ Resource, EVERYTHING, ) +from kopf.structs.reviews import ( + WebhookClientConfigService, + WebhookClientConfig, + Operation, + UserInfo, + Headers, + SSLPeer, + WebhookFn, + WebhookServerProtocol, +) from kopf.toolkits.hierarchies import ( adopt, label, @@ -140,6 +153,12 @@ append_owner_reference, remove_owner_reference, ) +from kopf.toolkits.webhooks import ( + WebhookServer, + WebhookK3dServer, + WebhookMinikubeServer, + WebhookNgrokTunnel, +) from kopf.utilities.piggybacking import ( login_via_pykube, login_via_client, @@ -160,6 +179,19 @@ 'build_object_reference', 'build_owner_reference', 'append_owner_reference', 'remove_owner_reference', 'ErrorsMode', + 'AdmissionError', + 'WebhookClientConfigService', + 'WebhookClientConfig', + 'Operation', + 'UserInfo', + 'Headers', + 'SSLPeer', + 'WebhookFn', + 'WebhookServerProtocol', + 'WebhookServer', + 'WebhookK3dServer', + 'WebhookMinikubeServer', + 'WebhookNgrokTunnel', 'PermanentError', 'TemporaryError', 'HandlerTimeoutError', diff --git a/kopf/clients/creating.py b/kopf/clients/creating.py new file mode 100644 index 00000000..44576e9a --- /dev/null +++ b/kopf/clients/creating.py @@ -0,0 +1,34 @@ +from typing import Optional, cast + +from kopf.clients import auth, errors +from kopf.structs import bodies, references + + +@auth.reauthenticated_request +async def create_obj( + *, + resource: references.Resource, + namespace: references.Namespace = None, + name: Optional[str] = None, + body: Optional[bodies.RawBody] = None, + context: Optional[auth.APIContext] = None, # injected by the decorator +) -> Optional[bodies.RawBody]: + """ + Create a resource. + """ + if context is None: + raise RuntimeError("API instance is not injected by the decorator.") + + body = body if body is not None else {} + if namespace is not None: + body.setdefault('metadata', {}).setdefault('namespace', namespace) + if name is not None: + body.setdefault('metadata', {}).setdefault('name', name) + + namespace = cast(references.Namespace, body.get('metadata', {}).get('namespace')) + response = await context.session.post( + url=resource.get_url(server=context.server, namespace=namespace), + json=body, + ) + created_body: bodies.RawBody = await errors.parse_response(response) + return created_body diff --git a/kopf/clients/errors.py b/kopf/clients/errors.py index 5900660b..eaa41b2c 100644 --- a/kopf/clients/errors.py +++ b/kopf/clients/errors.py @@ -101,6 +101,10 @@ class APINotFoundError(APIError): pass +class APIConflictError(APIError): + pass + + async def check_response( response: aiohttp.ClientResponse, ) -> None: @@ -124,6 +128,7 @@ async def check_response( APIUnauthorizedError if response.status == 401 else APIForbiddenError if response.status == 403 else APINotFoundError if response.status == 404 else + APIConflictError if response.status == 409 else APIError ) diff --git a/kopf/on.py b/kopf/on.py index 24ee0931..144fced5 100644 --- a/kopf/on.py +++ b/kopf/on.py @@ -14,12 +14,13 @@ def creation_handler(**kwargs): from typing import Any, Callable, Optional, Union from kopf.reactor import handling, registries -from kopf.structs import callbacks, dicts, filters, handlers, references +from kopf.structs import callbacks, dicts, filters, handlers, references, reviews ActivityDecorator = Callable[[callbacks.ActivityFn], callbacks.ActivityFn] ResourceIndexingDecorator = Callable[[callbacks.ResourceIndexingFn], callbacks.ResourceIndexingFn] ResourceWatchingDecorator = Callable[[callbacks.ResourceWatchingFn], callbacks.ResourceWatchingFn] ResourceChangingDecorator = Callable[[callbacks.ResourceChangingFn], callbacks.ResourceChangingFn] +ResourceWebhookDecorator = Callable[[callbacks.ResourceWebhookFn], callbacks.ResourceWebhookFn] ResourceDaemonDecorator = Callable[[callbacks.ResourceDaemonFn], callbacks.ResourceDaemonFn] ResourceTimerDecorator = Callable[[callbacks.ResourceTimerFn], callbacks.ResourceTimerFn] @@ -134,6 +135,118 @@ def decorator( # lgtm[py/similar-function] return decorator +def validate( # lgtm[py/similar-function] + # Resource type specification: + __group_or_groupversion_or_name: Optional[Union[str, references.Marker]] = None, + __version_or_name: Optional[Union[str, references.Marker]] = None, + __name: Optional[Union[str, references.Marker]] = None, + *, + group: Optional[str] = None, + version: Optional[str] = None, + kind: Optional[str] = None, + plural: Optional[str] = None, + singular: Optional[str] = None, + shortcut: Optional[str] = None, + category: Optional[str] = None, + # Handler's behaviour specification: + id: Optional[str] = None, + param: Optional[Any] = None, + operation: Optional[reviews.Operation] = None, # -> .webhooks.*.rules.*.operations[0] + persistent: Optional[bool] = None, + side_effects: Optional[bool] = None, # -> .webhooks.*.sideEffects + ignore_failures: Optional[bool] = None, # -> .webhooks.*.failurePolicy=Ignore + # Resource object specification: + labels: Optional[filters.MetaFilter] = None, + annotations: Optional[filters.MetaFilter] = None, + when: Optional[callbacks.WhenFilterFn] = None, + field: Optional[dicts.FieldSpec] = None, + value: Optional[filters.ValueFilter] = None, + # Operator specification: + registry: Optional[registries.OperatorRegistry] = None, +) -> ResourceWebhookDecorator: + """ ``@kopf.on.validate()`` handler for validating admission webhooks. """ + def decorator( # lgtm[py/similar-function] + fn: callbacks.ResourceWebhookFn, + ) -> callbacks.ResourceWebhookFn: + _warn_conflicting_values(field, value) + _verify_filters(labels, annotations) + real_registry = registry if registry is not None else registries.get_default_registry() + real_field = dicts.parse_field(field) or None # to not store tuple() as a no-field case. + real_id = registries.generate_id(fn=fn, id=id, suffix=".".join(real_field or [])) + selector = references.Selector( + __group_or_groupversion_or_name, __version_or_name, __name, + group=group, version=version, + kind=kind, plural=plural, singular=singular, shortcut=shortcut, category=category, + ) + handler = handlers.ResourceWebhookHandler( + fn=fn, id=real_id, param=param, + errors=None, timeout=None, retries=None, backoff=None, # TODO: add some meaning later + selector=selector, labels=labels, annotations=annotations, when=when, + field=real_field, value=value, + reason=handlers.WebhookType.VALIDATING, operation=operation, + persistent=persistent, side_effects=side_effects, ignore_failures=ignore_failures, + ) + real_registry._resource_webhooks.append(handler) + return fn + return decorator + + +def mutate( # lgtm[py/similar-function] + # Resource type specification: + __group_or_groupversion_or_name: Optional[Union[str, references.Marker]] = None, + __version_or_name: Optional[Union[str, references.Marker]] = None, + __name: Optional[Union[str, references.Marker]] = None, + *, + group: Optional[str] = None, + version: Optional[str] = None, + kind: Optional[str] = None, + plural: Optional[str] = None, + singular: Optional[str] = None, + shortcut: Optional[str] = None, + category: Optional[str] = None, + # Handler's behaviour specification: + id: Optional[str] = None, + param: Optional[Any] = None, + operation: Optional[reviews.Operation] = None, # -> .webhooks.*.rules.*.operations[0] + persistent: Optional[bool] = None, + side_effects: Optional[bool] = None, # -> .webhooks.*.sideEffects + ignore_failures: Optional[bool] = None, # -> .webhooks.*.failurePolicy=Ignore + # Resource object specification: + labels: Optional[filters.MetaFilter] = None, + annotations: Optional[filters.MetaFilter] = None, + when: Optional[callbacks.WhenFilterFn] = None, + field: Optional[dicts.FieldSpec] = None, + value: Optional[filters.ValueFilter] = None, + # Operator specification: + registry: Optional[registries.OperatorRegistry] = None, +) -> ResourceWebhookDecorator: + """ ``@kopf.on.mutate()`` handler for mutating admission webhooks. """ + def decorator( # lgtm[py/similar-function] + fn: callbacks.ResourceWebhookFn, + ) -> callbacks.ResourceWebhookFn: + _warn_conflicting_values(field, value) + _verify_filters(labels, annotations) + real_registry = registry if registry is not None else registries.get_default_registry() + real_field = dicts.parse_field(field) or None # to not store tuple() as a no-field case. + real_id = registries.generate_id(fn=fn, id=id, suffix=".".join(real_field or [])) + selector = references.Selector( + __group_or_groupversion_or_name, __version_or_name, __name, + group=group, version=version, + kind=kind, plural=plural, singular=singular, shortcut=shortcut, category=category, + ) + handler = handlers.ResourceWebhookHandler( + fn=fn, id=real_id, param=param, + errors=None, timeout=None, retries=None, backoff=None, # TODO: add some meaning later + selector=selector, labels=labels, annotations=annotations, when=when, + field=real_field, value=value, + reason=handlers.WebhookType.MUTATING, operation=operation, + persistent=persistent, side_effects=side_effects, ignore_failures=ignore_failures, + ) + real_registry._resource_webhooks.append(handler) + return fn + return decorator + + def resume( # lgtm[py/similar-function] # Resource type specification: __group_or_groupversion_or_name: Optional[Union[str, references.Marker]] = None, diff --git a/kopf/reactor/admission.py b/kopf/reactor/admission.py new file mode 100644 index 00000000..cf74c3ea --- /dev/null +++ b/kopf/reactor/admission.py @@ -0,0 +1,443 @@ +import asyncio +import base64 +import copy +import json +import logging +import re +import urllib.parse +from typing import Any, Collection, Dict, Iterable, List, Mapping, Optional + +from typing_extensions import Literal, TypedDict + +from kopf.clients import creating, errors, patching +from kopf.engines import loggers +from kopf.reactor import causation, handling, lifecycles, registries +from kopf.storage import states +from kopf.structs import bodies, configuration, containers, ephemera, filters, \ + handlers, patches, primitives, references, reviews + +logger = logging.getLogger(__name__) + + +class AdmissionError(handling.PermanentError): + """ + Raised by admission handlers when an API operation under check is bad. + + An admission error behaves the same as `kopf.PermanentError`, but provides + admission-specific payload for the response: a message & a numeric code. + + This error type is preferred when selecting only one error to report back + to apiservers as the admission review result -- in case multiple handlers + are called in one admission request, i.e. when the webhook endpoints + are not mapped to the handler ids (e.g. when configured manually). + """ + def __init__( + self, + message: Optional[str] = '', + code: Optional[int] = 500, + ) -> None: + super().__init__(message) + self.code = code + + +class WebhookError(Exception): + """ + Raised when a webhook request is bad, not an API operation under check. + """ + + +class MissingDataError(WebhookError): + """ An admission is requested but some expected data are missing. """ + + +class UnknownResourceError(WebhookError): + """ An admission is made for a resource that the operator does not have. """ + + +class AmbiguousResourceError(WebhookError): + """ An admission is made for one resource, but we (somehow) found a few. """ + + +async def serve_admission_request( + # Required for all webhook servers, meaningless without it: + request: reviews.Request, + *, + # Optional for webhook servers that can recognise this information: + headers: Optional[Mapping[str, str]] = None, + sslpeer: Optional[Mapping[str, Any]] = None, + webhook: Optional[handlers.HandlerId] = None, + reason: Optional[handlers.WebhookType] = None, # TODO: undocumented: requires typing clarity! + # Injected by partial() from spawn_tasks(): + settings: configuration.OperatorSettings, + memories: containers.ResourceMemories, + memobase: ephemera.AnyMemo, + registry: registries.OperatorRegistry, + insights: references.Insights, + indices: ephemera.Indices, +) -> reviews.Response: + """ + The actual and the only implementation of the `WebhookFn` protocol. + + This function is passed to all webhook servers/tunnels to be called + whenever a new admission request is received. + + Some parameters are provided by the framework itself via partial binding, + so that the resulting function matches the `WebhookFn` protocol. Other + parameters are passed by the webhook servers when they call the function. + """ + + # Reconstruct the cause specially for web handlers. + resource = find_resource(request=request, insights=insights) + operation = request.get('request', {}).get('operation') + userinfo = request.get('request', {}).get('userInfo') + new_body = request.get('request', {}).get('object') + old_body = request.get('request', {}).get('oldObject') + raw_body = new_body if new_body is not None else old_body + if userinfo is None: + raise MissingDataError("User info is missing from the admission request.") + if raw_body is None: + raise MissingDataError("Either old or new object is missing from the admission request.") + + memo = await memories.recall(raw_body, memo=memobase, ephemeral=operation=='CREATE') + body = bodies.Body(raw_body) + patch = patches.Patch() + warnings: List[str] = [] + cause = causation.ResourceWebhookCause( + resource=resource, + indices=indices, + logger=loggers.LocalObjectLogger(body=body, settings=settings), + patch=patch, + memo=memo, + body=body, + userinfo=userinfo, + warnings=warnings, + operation=operation, + dryrun=bool(request.get('request', {}).get('dryRun')), + sslpeer=sslpeer if sslpeer is not None else {}, # ensure a mapping even if not provided. + headers=headers if headers is not None else {}, # ensure a mapping even if not provided. + webhook=webhook, + reason=reason, + ) + + # Retrieve the handlers to be executed; maybe only one if the webhook server provides a hint. + handlers_ = registry._resource_webhooks.get_handlers(cause) + state = states.State.from_scratch().with_handlers(handlers_) + outcomes = await handling.execute_handlers_once( + lifecycle=lifecycles.all_at_once, + settings=settings, + handlers=handlers_, + cause=cause, + state=state, + default_errors=handlers.ErrorsMode.PERMANENT, + ) + + # Construct the response as per Kubernetes's conventions and expectations. + response = build_response( + request=request, + outcomes=outcomes, + warnings=warnings, + jsonpatch=patch.as_json_patch(), + ) + return response + + +def find_resource( + *, + request: reviews.Request, + insights: references.Insights, +) -> references.Resource: + """ + Identify the requested resource by its meta-information (as discovered). + """ + # NB: Absent keys in the request are not acceptable, they must be provided. + request_payload: reviews.RequestPayload = request['request'] + request_resource: reviews.RequestResource = request_payload['resource'] + group = request_resource['group'] + version = request_resource['version'] + plural = request_resource['resource'] + selector = references.Selector(group=group, version=version, plural=plural) + resources = selector.select(insights.resources) + if not resources: + raise UnknownResourceError(f"The specified resource has no handlers: {request_resource}") + elif len(resources) > 1: + raise AmbiguousResourceError(f"The specified resource is ambiguous: {request_resource}") + else: + return list(resources)[0] + + +def build_response( + *, + request: reviews.Request, + outcomes: Mapping[handlers.HandlerId, states.HandlerOutcome], + warnings: Collection[str], + jsonpatch: patches.JSONPatch, +) -> reviews.Response: + """ + Construct the admission review response to a review request. + """ + allowed = all(outcome.exception is None for id, outcome in outcomes.items()) + response = reviews.Response( + apiVersion=request.get('apiVersion', 'admission.k8s.io/v1'), + kind=request.get('kind', 'AdmissionReview'), + response=reviews.ResponsePayload( + uid=request.get('request', {}).get('uid', ''), + allowed=allowed)) + if warnings: + response['response']['warnings'] = [str(warning) for warning in warnings] + if jsonpatch: + encoded_patch: str = base64.b64encode(json.dumps(jsonpatch).encode('utf-8')).decode('ascii') + response['response']['patch'] = encoded_patch + response['response']['patchType'] = 'JSONPatch' + + # Prefer specialised admission errors to all other errors, Kopf's own errors to arbitrary ones. + errors = [outcome.exception for outcome in outcomes.values() if outcome.exception is not None] + errors.sort(key=lambda error: ( + 0 if isinstance(error, AdmissionError) else + 1 if isinstance(error, handling.PermanentError) else + 2 if isinstance(error, handling.TemporaryError) else + 9 + )) + if errors: + response['response']['status'] = reviews.ResponseStatus( + message=str(errors[0]) or repr(errors[0]), + code=(errors[0].code if isinstance(errors[0], AdmissionError) else None) or 500, + ) + return response + + +async def admission_webhook_server( + *, + settings: configuration.OperatorSettings, + registry: registries.OperatorRegistry, + insights: references.Insights, + webhookfn: reviews.WebhookFn, + container: primitives.Container[reviews.WebhookClientConfig], +) -> None: + + # Verify that the operator is configured properly (after the startup activities are done). + has_admission = bool(registry._resource_webhooks.get_all_handlers()) + if settings.admission.server is None and has_admission: + raise Exception( + "Admission handlers exist, but no admission server/tunnel is configured " + "in `settings.admission.server`. " + "More: https://kopf.readthedocs.io/en/stable/admission/") + + # Do not start the endpoints until resources are scanned. + # Otherwise, we generate 404 "Not Found" for requests that arrive too early. + await insights.ready_resources.wait() + + # Communicate all the client configs the server yields: both the initial one and the updates. + # On each such change, the configuration manager will wake up and reconfigure the webhooks. + if settings.admission.server is not None: + async for client_config in settings.admission.server(webhookfn): + await container.set(client_config) + else: + await asyncio.Event().wait() + + +async def validating_configuration_manager( + *, + registry: registries.OperatorRegistry, + settings: configuration.OperatorSettings, + insights: references.Insights, + container: primitives.Container[reviews.WebhookClientConfig], +) -> None: + await configuration_manager( + reason=handlers.WebhookType.VALIDATING, + selector=references.VALIDATING_WEBHOOK, + registry=registry, settings=settings, + insights=insights, container=container, + ) + + +async def mutating_configuration_manager( + *, + registry: registries.OperatorRegistry, + settings: configuration.OperatorSettings, + insights: references.Insights, + container: primitives.Container[reviews.WebhookClientConfig], +) -> None: + await configuration_manager( + reason=handlers.WebhookType.MUTATING, + selector=references.MUTATING_WEBHOOK, + registry=registry, settings=settings, + insights=insights, container=container, + ) + + +async def configuration_manager( + *, + reason: handlers.WebhookType, + selector: references.Selector, + registry: registries.OperatorRegistry, + settings: configuration.OperatorSettings, + insights: references.Insights, + container: primitives.Container[reviews.WebhookClientConfig], +) -> None: + """ + Manage the webhook configurations dynamically. + + This is one of an operator's root tasks that run forever. + If exited, the whole operator exits as by an error. + + The manager waits for changes in one of these: + + * Observed resources in the cluster (via insights). + * A new webhook client config yielded by the webhook server. + + On either of these occasion, the manager rebuilds the webhook configuration + and applies it to the specified configuration resources in the cluster + (for which it needs some RBAC permissions). + Besides, it also creates an webhook configuration resource if it is absent. + """ + + # Do nothing if not managed. The root task cannot be skipped from creation, + # since the managed mode is only set at the startup activities. + if settings.admission.managed is None: + await asyncio.Event().wait() + return + + # Wait until the prerequisites for managing are available (scanned from the cluster). + await insights.ready_resources.wait() + resource = await insights.backbone.wait_for(selector) + all_handlers = registry._resource_webhooks.get_all_handlers() + all_handlers = [h for h in all_handlers if h.reason == reason] + + # Optionally (if configured), pre-create the configuration objects if they are absent. + # Use the try-or-fail strategy instead of check-and-do -- to reduce the RBAC requirements. + try: + await creating.create_obj(resource=resource, name=settings.admission.managed) + except errors.APIConflictError: + pass # exists already + except errors.APIForbiddenError: + logger.error(f"Not enough RBAC permissions to create a {resource}.") + raise + + # Execute either when actually changed (yielded from the webhook server), + # or the condition is chain-notified (from the insights: on resources/namespaces revision). + # Ignore inconsistencies: they are expected -- the server fills the defaults. + client_config: Optional[reviews.WebhookClientConfig] = None + try: + async for client_config in container.as_changed(): + logger.info(f"Reconfiguring the {reason.value} webhook {settings.admission.managed}.") + webhooks = build_webhooks( + all_handlers, + resources=insights.resources, + name_suffix=settings.admission.managed, + client_config=client_config) + await patching.patch_obj( + resource=resource, + namespace=None, + name=settings.admission.managed, + patch=patches.Patch({'webhooks': webhooks}), + ) + finally: + # Attempt to remove all managed webhooks, except for the strict ones. + if client_config is not None: + logger.info(f"Cleaning up the admission webhook {settings.admission.managed}.") + webhooks = build_webhooks( + all_handlers, + resources=insights.resources, + name_suffix=settings.admission.managed, + client_config=client_config, + persistent_only=True) + await patching.patch_obj( + resource=resource, + namespace=None, + name=settings.admission.managed, + patch=patches.Patch({'webhooks': webhooks}), + ) + + +def build_webhooks( + handlers_: Iterable[handlers.ResourceWebhookHandler], + *, + resources: Iterable[references.Resource], + name_suffix: str, + client_config: reviews.WebhookClientConfig, + persistent_only: bool = False, +) -> List[Dict[str, Any]]: + """ + Construct the content for ``[Validating|Mutating]WebhookConfiguration``. + + This function concentrates all conventions how Kopf manages the webhook. + """ + return [ + { + 'name': _normalize_name(handler.id, suffix=name_suffix), + 'sideEffects': 'NoneOnDryRun' if handler.side_effects else 'None', + 'failurePolicy': 'Ignore' if handler.ignore_failures else 'Fail', + 'matchPolicy': 'Equivalent', + 'rules': [ + { + 'apiGroups': [resource.group], + 'apiVersions': [resource.version], + 'resources': [resource.plural], + 'operations': ['*'] if handler.operation is None else [handler.operation], + 'scope': '*', # doesn't matter since a specific resource is used. + } + for resource in resources + if handler.selector is not None # None is used only in sub-handlers, ignore here. + if handler.selector.check(resource) + ], + 'objectSelector': _build_labels_selector(handler.labels), + 'clientConfig': _inject_handler_id(client_config, handler.id), + 'timeoutSeconds': 30, # a permitted maximum is 30. + 'admissionReviewVersions': ['v1', 'v1beta1'], # only those understood by Kopf itself. + } + for handler in handlers_ + if not persistent_only or handler.persistent + ] + + +class MatchExpression(TypedDict, total=False): + key: str + operator: Literal['Exists', 'DoesNotExist', 'In', 'NotIn'] + values: Optional[Collection[str]] + + +def _build_labels_selector(labels: Optional[filters.MetaFilter]) -> Optional[Mapping[str, Any]]: + # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements + exprs: Collection[MatchExpression] = [ + {'key': key, 'operator': 'Exists'} if val is filters.MetaFilterToken.PRESENT else + {'key': key, 'operator': 'DoesNotExist'} if val is filters.MetaFilterToken.ABSENT else + {'key': key, 'operator': 'In', 'values': [str(val)]} + for key, val in (labels or {}).items() + if not callable(val) + ] + return {'matchExpressions': exprs} if exprs else None + + +BAD_WEBHOOK_NAME = re.compile(r'[^\w\d\.-]') + + +def _normalize_name(id: handlers.HandlerId, suffix: str) -> str: + """ + Normalize the webhook name to what Kubernetes accepts as normal. + + The restriction is: *a lowercase RFC 1123 subdomain must consist + of lower case alphanumeric characters, \'-\' or \'.\', + and must start and end with an alphanumeric character.* + + The actual name is not that important, it is for informational purposes + only. In the managed configurations, it will be rewritten every time. + """ + name = f'{id}'.replace('/', '.').replace('_', '-') # common cases, for beauty + name = BAD_WEBHOOK_NAME.sub(lambda s: s.group(0).encode('utf-8').hex(), name) # uncommon cases + return f'{name}.{suffix}' if suffix else name + + +def _inject_handler_id(config: reviews.WebhookClientConfig, id: handlers.HandlerId) -> reviews.WebhookClientConfig: + config = copy.deepcopy(config) + + url_id = urllib.parse.quote(id) + url = config.get('url') + if url is not None: + config['url'] = f'{url.rstrip("/")}/{url_id}' + + service = config.get('service') + if service is not None: + path = service.get('path', '') + service['path'] = f"{path}/{url_id}" + + return config diff --git a/kopf/reactor/causation.py b/kopf/reactor/causation.py index 3a70d18e..3ab8996b 100644 --- a/kopf/reactor/causation.py +++ b/kopf/reactor/causation.py @@ -21,11 +21,11 @@ """ import dataclasses import logging -from typing import Any, Optional, TypeVar, Union +from typing import Any, List, Mapping, Optional, TypeVar, Union from kopf.storage import finalizers -from kopf.structs import bodies, configuration, diffs, ephemera, \ - handlers, patches, primitives, references +from kopf.structs import bodies, configuration, diffs, ephemera, handlers, \ + patches, primitives, references, reviews @dataclasses.dataclass @@ -48,6 +48,18 @@ class ResourceCause(BaseCause): body: bodies.Body +@dataclasses.dataclass +class ResourceWebhookCause(ResourceCause): + dryrun: bool + reason: Optional[handlers.WebhookType] # None means "all" or expects the webhook id + webhook: Optional[handlers.HandlerId] # None means "all" + headers: Mapping[str, str] + sslpeer: Mapping[str, Any] + userinfo: reviews.UserInfo + warnings: List[str] # mutable! + operation: Optional[reviews.Operation] # None if not provided for some reason + + @dataclasses.dataclass class ResourceIndexingCause(ResourceCause): """ diff --git a/kopf/reactor/invocation.py b/kopf/reactor/invocation.py index d07f1127..0cbec5b8 100644 --- a/kopf/reactor/invocation.py +++ b/kopf/reactor/invocation.py @@ -73,6 +73,14 @@ def build_kwargs( labels=cause.body.metadata.labels, annotations=cause.body.metadata.annotations, ) + if isinstance(cause, causation.ResourceWebhookCause): + new_kwargs.update( + dryrun=cause.dryrun, + headers=cause.headers, + sslpeer=cause.sslpeer, + userinfo=cause.userinfo, + warnings=cause.warnings, + ) if isinstance(cause, causation.ResourceWatchingCause): new_kwargs.update( event=cause.raw, diff --git a/kopf/reactor/observation.py b/kopf/reactor/observation.py index c20d8e61..1143f5fe 100644 --- a/kopf/reactor/observation.py +++ b/kopf/reactor/observation.py @@ -91,6 +91,7 @@ async def resource_observer( # Scan only the resource-related handlers, ignore activies & co. all_handlers: List[handlers.ResourceHandler] = [] + all_handlers.extend(registry._resource_webhooks.get_all_handlers()) all_handlers.extend(registry._resource_indexing.get_all_handlers()) all_handlers.extend(registry._resource_watching.get_all_handlers()) all_handlers.extend(registry._resource_spawning.get_all_handlers()) @@ -204,6 +205,7 @@ def revise_resources( # Scan only the resource-related handlers, ignore activies & co. all_handlers: List[handlers.ResourceHandler] = [] + all_handlers.extend(registry._resource_webhooks.get_all_handlers()) all_handlers.extend(registry._resource_indexing.get_all_handlers()) all_handlers.extend(registry._resource_watching.get_all_handlers()) all_handlers.extend(registry._resource_spawning.get_all_handlers()) diff --git a/kopf/reactor/registries.py b/kopf/reactor/registries.py index e2cc767f..bd93d204 100644 --- a/kopf/reactor/registries.py +++ b/kopf/reactor/registries.py @@ -31,7 +31,8 @@ callbacks.ResourceIndexingFn, callbacks.ResourceWatchingFn, callbacks.ResourceSpawningFn, - callbacks.ResourceChangingFn) + callbacks.ResourceChangingFn, + callbacks.ResourceWebhookFn) class GenericRegistry(Generic[HandlerFnT, HandlerT]): @@ -240,6 +241,32 @@ def get_resource_handlers( return list(_deduplicated(found_handlers)) +class ResourceWebhooksRegistry(ResourceRegistry[ + causation.ResourceWebhookCause, + callbacks.ResourceWebhookFn, + handlers.ResourceWebhookHandler]): + + def iter_handlers( + self, + cause: causation.ResourceWebhookCause, + excluded: Container[handlers.HandlerId] = frozenset(), + ) -> Iterator[handlers.ResourceWebhookHandler]: + for handler in self._handlers: + if handler.id not in excluded: + # Only the handlers for the hinted webhook, if possible; if not hinted, then all. + matching_reason = cause.reason is None or cause.reason == handler.reason + matching_webhook = cause.webhook is None or cause.webhook == handler.id + if matching_reason and matching_webhook: + # For deletion, exclude all mutation handlers unless explicitly enabled. + non_mutating = handler.reason != handlers.WebhookType.MUTATING + non_deletion = cause.operation != 'DELETE' + explicitly_for_deletion = handler.operation == 'DELETE' + if non_mutating or non_deletion or explicitly_for_deletion: + # Filter by usual criteria: labels, annotations, fields, callbacks. + if match(handler=handler, cause=cause): + yield handler + + class OperatorRegistry: """ A global registry is used for handling of multiple resources & activities. @@ -254,6 +281,7 @@ def __init__(self) -> None: self._resource_watching = ResourceWatchingRegistry() self._resource_spawning = ResourceSpawningRegistry() self._resource_changing = ResourceChangingRegistry() + self._resource_webhooks = ResourceWebhooksRegistry() class SmartOperatorRegistry(OperatorRegistry): diff --git a/kopf/reactor/running.py b/kopf/reactor/running.py index 4c022903..03ae9bd9 100644 --- a/kopf/reactor/running.py +++ b/kopf/reactor/running.py @@ -8,10 +8,10 @@ from kopf.clients import auth from kopf.engines import peering, posting, probing -from kopf.reactor import activities, daemons, indexing, lifecycles, \ +from kopf.reactor import activities, admission, daemons, indexing, lifecycles, \ observation, orchestration, processing, registries -from kopf.structs import configuration, containers, credentials, \ - ephemera, handlers, primitives, references +from kopf.structs import configuration, containers, credentials, ephemera, \ + handlers, primitives, references, reviews from kopf.utilities import aiotasks logger = logging.getLogger(__name__) @@ -266,6 +266,30 @@ async def spawn_tasks( indices=indexers.indices, memo=memo))) + # Admission webhooks run as either a server or a tunnel or a fixed config. + # The webhook manager automatically adjusts the cluster configuration at runtime. + container: primitives.Container[reviews.WebhookClientConfig] = primitives.Container() + tasks.append(aiotasks.create_guarded_task( + name="admission insights chain", flag=started_flag, logger=logger, + coro=primitives.condition_chain( + source=insights.revised, target=container.changed))) + tasks.append(aiotasks.create_guarded_task( + name="admission validating configuration manager", flag=started_flag, logger=logger, + coro=admission.validating_configuration_manager( + container=container, settings=settings, registry=registry, insights=insights))) + tasks.append(aiotasks.create_guarded_task( + name="admission mutating configuration manager", flag=started_flag, logger=logger, + coro=admission.mutating_configuration_manager( + container=container, settings=settings, registry=registry, insights=insights))) + tasks.append(aiotasks.create_guarded_task( + name="admission webhook server", flag=started_flag, logger=logger, + coro=admission.admission_webhook_server( + container=container, settings=settings, registry=registry, insights=insights, + webhookfn=functools.partial(admission.serve_admission_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=memo, + indices=indexers.indices)))) + # Permanent observation of what resource kinds and namespaces are available in the cluster. # Spawn and cancel dimensional tasks as they come and go; dimensions = resources x namespaces. tasks.append(aiotasks.create_guarded_task( diff --git a/kopf/structs/callbacks.py b/kopf/structs/callbacks.py index a0e19b22..d9065479 100644 --- a/kopf/structs/callbacks.py +++ b/kopf/structs/callbacks.py @@ -7,9 +7,10 @@ import datetime import logging from typing import TYPE_CHECKING, Any, Callable, Collection, \ - Coroutine, NewType, Optional, TypeVar, Union + Coroutine, List, NewType, Optional, TypeVar, Union -from kopf.structs import bodies, diffs, ephemera, patches, primitives, references +from kopf.structs import bodies, configuration, diffs, ephemera, \ + patches, primitives, references, reviews # A specialised type to highlight the purpose or origin of the data of type Any, # to not be mixed with other arbitrary Any values, where it is indeed "any". @@ -32,6 +33,7 @@ ResourceIndexingFn = Callable[..., _SyncOrAsyncResult] ResourceWatchingFn = Callable[..., _SyncOrAsyncResult] ResourceChangingFn = Callable[..., _SyncOrAsyncResult] + ResourceWebhookFn = Callable[..., None] ResourceDaemonFn = Callable[..., _SyncOrAsyncResult] ResourceTimerFn = Callable[..., _SyncOrAsyncResult] WhenFilterFn = Callable[..., bool] @@ -43,6 +45,7 @@ # when PEP 612 is released (https://www.python.org/dev/peps/pep-0612/) ActivityFn = Callable[ [ + NamedArg(configuration.OperatorSettings, "settings"), NamedArg(ephemera.Index, "*"), NamedArg(int, "retry"), NamedArg(datetime.datetime, "started"), @@ -127,6 +130,32 @@ _SyncOrAsyncResult ] + ResourceWebhookFn = Callable[ + [ + NamedArg(bool, "dryrun"), + NamedArg(List[str], "warnings"), # mutable! + NamedArg(reviews.UserInfo, "userinfo"), + NamedArg(reviews.SSLPeer, "sslpeer"), + NamedArg(reviews.Headers, "headers"), + NamedArg(bodies.Labels, "labels"), + NamedArg(bodies.Annotations, "annotations"), + NamedArg(bodies.Body, "body"), + NamedArg(bodies.Meta, "meta"), + NamedArg(bodies.Spec, "spec"), + NamedArg(bodies.Status, "status"), + NamedArg(references.Resource, "resource"), + NamedArg(Optional[str], "uid"), + NamedArg(Optional[str], "name"), + NamedArg(Optional[str], "namespace"), + NamedArg(patches.Patch, "patch"), + NamedArg(LoggerType, "logger"), + NamedArg(ephemera.AnyMemo, "memo"), + DefaultNamedArg(Any, "param"), + KwArg(Any), + ], + None + ] + ResourceDaemonFn = Callable[ [ NamedArg(primitives.SyncAsyncDaemonStopperChecker, "stopped"), diff --git a/kopf/structs/configuration.py b/kopf/structs/configuration.py index 6329a6f7..2a9672e4 100644 --- a/kopf/structs/configuration.py +++ b/kopf/structs/configuration.py @@ -31,6 +31,7 @@ from typing import Iterable, Optional from kopf.storage import diffbase, progress +from kopf.structs import reviews @dataclasses.dataclass @@ -258,6 +259,47 @@ class ScanningSettings: """ +@dataclasses.dataclass +class AdmissionSettings: + + server: Optional[reviews.WebhookServerProtocol] = None + """ + A way of accepting admission requests from Kubernetes. + + In production, only a `kopf.WebhookServer` is sufficient. + If development, a tunnel from the cluster to the operator might be needed. + + If no server is configured (the default), then no server is started. + If admission handlers are detected with no server configured, + an error is raised and the operator fails to start (with a hint). + + Kopf provides several webhook configs, servers, and tunnels out of the box + (they also serve as examples for implementing custom tunnels). + `kopf.WebhookServer`, + `kopf.WebhookK3dServer`, `kopf.WebhookMinikubeServer`, + `kopf.WebhookNgrokTunnel`, `kopf.WebhookInletsTunnel`. + + .. seealso:: + :doc:`/admission`. + """ + + managed: Optional[str] = None + """ + The names of managed ``[Validating/Mutating]WebhookConfiguration`` objects. + + If not set (the default), Kopf does not manage the configuration and + expects that the requests come from a manually pre-created configuration. + + If set, Kopf creates the validating/mutating configurations objects with + this name and continuously keeps them up to date with the currently served + resources and client configs as they change at runtime. + All existing webhooks in these configuration objects are overwritten. + + This feature requires the ``patch`` and ``create`` RBAC verbs + for ``admissionregistration.k8s.io``'s resources (:doc:`/admission`). + """ + + @dataclasses.dataclass class ExecutionSettings: """ @@ -385,6 +427,7 @@ class OperatorSettings: watching: WatchingSettings = dataclasses.field(default_factory=WatchingSettings) batching: BatchingSettings = dataclasses.field(default_factory=BatchingSettings) scanning: ScanningSettings = dataclasses.field(default_factory=ScanningSettings) + admission: AdmissionSettings =dataclasses.field(default_factory=AdmissionSettings) execution: ExecutionSettings = dataclasses.field(default_factory=ExecutionSettings) background: BackgroundSettings = dataclasses.field(default_factory=BackgroundSettings) persistence: PersistenceSettings = dataclasses.field(default_factory=PersistenceSettings) diff --git a/kopf/structs/containers.py b/kopf/structs/containers.py index f6fb91d6..08cedf01 100644 --- a/kopf/structs/containers.py +++ b/kopf/structs/containers.py @@ -92,20 +92,30 @@ async def recall( *, memo: Optional[ephemera.AnyMemo] = None, noticed_by_listing: bool = False, + ephemeral: bool = False, ) -> ResourceMemory: """ Either find a resource's memory, or create and remember a new one. Keep the last-seen body up to date for all the handlers. + + Ephemeral memos are not remembered now + (later: will be remembered for short time, and then garbage-collected). + They are used by admission webhooks before the resource is created -- + to not waste RAM for what might never exist. The persistent memo + will be created *after* the resource creation really happens. """ key = self._build_key(raw_body) - if key not in self._items: + if key in self._items: + memory = self._items[key] + else: if memo is None: memory = ResourceMemory(noticed_by_listing=noticed_by_listing) else: memory = ResourceMemory(noticed_by_listing=noticed_by_listing, memo=copy.copy(memo)) - self._items[key] = memory - return self._items[key] + if not ephemeral: + self._items[key] = memory + return memory async def forget( self, diff --git a/kopf/structs/handlers.py b/kopf/structs/handlers.py index 8527ce9d..863274b3 100644 --- a/kopf/structs/handlers.py +++ b/kopf/structs/handlers.py @@ -23,6 +23,14 @@ class Activity(str, enum.Enum): PROBE = 'probe' +class WebhookType(str, enum.Enum): + VALIDATING = 'validating' + MUTATING = 'mutating' + + def __str__(self) -> str: + return str(self.value) + + # Constants for cause types, to prevent a direct usage of strings, and typos. # They are not exposed by the framework, but are used internally. See also: `kopf.on`. class Reason(str, enum.Enum): @@ -105,6 +113,23 @@ def requires_patching(self) -> bool: return True # all typical handlers except several ones with overrides +@dataclasses.dataclass +class ResourceWebhookHandler(ResourceHandler): + fn: callbacks.ResourceWebhookFn # type clarification + reason: WebhookType + operation: Optional[str] + persistent: Optional[bool] + side_effects: Optional[bool] + ignore_failures: Optional[bool] + + def __str__(self) -> str: + return f"Webhook {self.id!r}" + + @property + def requires_patching(self) -> bool: + return False + + @dataclasses.dataclass class ResourceIndexingHandler(ResourceHandler): fn: callbacks.ResourceIndexingFn # type clarification diff --git a/kopf/structs/patches.py b/kopf/structs/patches.py index d888037d..8c398f2e 100644 --- a/kopf/structs/patches.py +++ b/kopf/structs/patches.py @@ -8,10 +8,24 @@ a dict-like behaviour, and remembers the changes in order of their execution, and then generates the JSON patch (RFC 6902). """ -from typing import Any, Dict, MutableMapping, Optional +import collections.abc +from typing import Any, Dict, List, MutableMapping, Optional + +from typing_extensions import Literal, TypedDict from kopf.structs import dicts +JSONPatchOp = Literal["add", "replace", "remove"] + + +class JSONPatchItem(TypedDict, total=False): + op: JSONPatchOp + path: str + value: Optional[Any] + + +JSONPatch = List[JSONPatchItem] + class MetaPatch(dicts.MutableMappingView[str, Any]): _labels: dicts.MutableMappingView[str, Optional[str]] @@ -65,3 +79,18 @@ def spec(self) -> SpecPatch: @property def status(self) -> StatusPatch: return self._status + + def as_json_patch(self) -> JSONPatch: + return [] if not self else self._as_json_patch(self, keys=['']) + + def _as_json_patch(self, value: object, keys: List[str]) -> JSONPatch: + result: JSONPatch = [] + if value is None: + result.append(JSONPatchItem(op='remove', path='/'.join(keys))) + elif isinstance(value, collections.abc.Mapping) and value: + for key, val in value.items(): + result.extend(self._as_json_patch(val, keys + [key])) + else: + # TODO: need to distinguish 'add' vs 'replace' -- need to know the original value. + result.append(JSONPatchItem(op='replace', path='/'.join(keys), value=value)) + return result diff --git a/kopf/structs/primitives.py b/kopf/structs/primitives.py index a809dd2e..dee7dc9e 100644 --- a/kopf/structs/primitives.py +++ b/kopf/structs/primitives.py @@ -7,7 +7,8 @@ import enum import threading import time -from typing import Any, Callable, Collection, Iterable, Iterator, Optional, Set, Union +from typing import Any, AsyncIterator, Callable, Collection, Generic, \ + Iterable, Iterator, Optional, Set, TypeVar, Union from kopf.utilities import aiotasks @@ -82,6 +83,68 @@ def check_flag( raise TypeError(f"Unsupported type of a flag: {flag!r}") +async def condition_chain( + source: asyncio.Condition, + target: asyncio.Condition, +) -> None: + """ + A condition chain is a "clean" hack to attach one condition to another. + + It is a "clean" (not "dirty") hack to wake up the webhook configuration + managers when either the resources are revised (as seen in the insights), + or a new client config is yielded from the webhook server. + """ + async with source: + while True: + await source.wait() + async with target: + target.notify_all() + + +_T = TypeVar('_T') + + +class Container(Generic[_T]): + + def __init__(self) -> None: + super().__init__() + self.changed = asyncio.Condition() + self._values: Collection[_T] = [] # 0..1 item + + def get_nowait(self) -> _T: # used mostly in testing + try: + return next(iter(self._values)) + except StopIteration: + raise LookupError("No value is stored in the container.") from None + + async def set(self, value: _T) -> None: + async with self.changed: + self._values = [value] + self.changed.notify_all() + + async def wait(self) -> _T: + async with self.changed: + await self.changed.wait_for(lambda: self._values) + try: + return next(iter(self._values)) + except StopIteration: # impossible because of the condition's predicate + raise LookupError("No value is stored in the container.") from None + + async def reset(self) -> None: + async with self.changed: + self._values = [] + self.changed.notify_all() + + async def as_changed(self) -> AsyncIterator[_T]: + async with self.changed: + while True: + try: + yield next(iter(self._values)) + except StopIteration: + pass + await self.changed.wait() + + # Mind the value: it can be bool-evaluatable but non-bool -- always convert it. class Toggle: """ diff --git a/kopf/structs/references.py b/kopf/structs/references.py index 62530027..bbf3c746 100644 --- a/kopf/structs/references.py +++ b/kopf/structs/references.py @@ -397,6 +397,8 @@ def select(self, resources: Collection[Resource]) -> Collection[Resource]: NAMESPACES = Selector('v1', 'namespaces') CLUSTER_PEERINGS = Selector('clusterkopfpeerings') NAMESPACED_PEERINGS = Selector('kopfpeerings') +MUTATING_WEBHOOK = Selector('admissionregistration.k8s.io', 'mutatingwebhookconfigurations') +VALIDATING_WEBHOOK = Selector('admissionregistration.k8s.io', 'validatingwebhookconfigurations') class Backbone(Mapping[Selector, Resource]): @@ -422,7 +424,11 @@ def __init__(self) -> None: super().__init__() self._items: MutableMapping[Selector, Resource] = {} self._revised = asyncio.Condition() - self.selectors = [NAMESPACES, EVENTS, CRDS, CLUSTER_PEERINGS, NAMESPACED_PEERINGS] + self.selectors = [ + NAMESPACES, EVENTS, CRDS, + CLUSTER_PEERINGS, NAMESPACED_PEERINGS, + MUTATING_WEBHOOK, VALIDATING_WEBHOOK, + ] def __len__(self) -> int: return len(self._items) diff --git a/kopf/structs/reviews.py b/kopf/structs/reviews.py new file mode 100644 index 00000000..2f3c78a7 --- /dev/null +++ b/kopf/structs/reviews.py @@ -0,0 +1,134 @@ +""" +Admission reviews: requests & responses, also the webhook server protocols. +""" +from typing import Any, AsyncIterator, Awaitable, Callable, List, Mapping, Optional, Union + +from typing_extensions import Literal, Protocol, TypedDict + +from kopf.structs import bodies + +Headers = Mapping[str, str] +SSLPeer = Mapping[str, Any] + +Operation = Literal['CREATE', 'UPDATE', 'DELETE', 'CONNECT'] + + +class RequestKind(TypedDict): + group: str + version: str + kind: str + + +class RequestResource(TypedDict): + group: str + version: str + resource: str + + +class UserInfo(TypedDict): + username: str + uid: str + groups: List[str] + + +class CreateOptions(TypedDict, total=False): + apiVersion: Literal["meta.k8s.io/v1"] + kind: Literal["CreateOptions"] + + +class UpdateOptions(TypedDict, total=False): + apiVersion: Literal["meta.k8s.io/v1"] + kind: Literal["UpdateOptions"] + + +class DeleteOptions(TypedDict, total=False): + apiVersion: Literal["meta.k8s.io/v1"] + kind: Literal["DeleteOptions"] + + +class RequestPayload(TypedDict): + uid: str + kind: RequestKind + resource: RequestResource + requestKind: RequestKind + requestResource: RequestResource + userInfo: UserInfo + name: str + namespace: Optional[str] + operation: Operation + options: Union[None, CreateOptions, UpdateOptions, DeleteOptions] + dryRun: bool + object: bodies.RawBody + oldObject: Optional[bodies.RawBody] + + +class Request(TypedDict): + apiVersion: Literal["admission.k8s.io/v1", "admission.k8s.io/v1beta1"] + kind: Literal["AdmissionReview"] + request: RequestPayload + + +class ResponseStatus(TypedDict, total=False): + code: int + message: str + + +class ResponsePayload(TypedDict, total=False): + uid: str + allowed: bool + warnings: Optional[List[str]] + status: Optional[ResponseStatus] + patch: Optional[str] + patchType: Optional[Literal["JSONPatch"]] + + +class Response(TypedDict): + apiVersion: Literal["admission.k8s.io/v1", "admission.k8s.io/v1beta1"] + kind: Literal["AdmissionReview"] + response: ResponsePayload + + +class WebhookClientConfigService(TypedDict, total=False): + namespace: Optional[str] + name: Optional[str] + path: Optional[str] + port: Optional[int] + + +class WebhookClientConfig(TypedDict, total=False): + """ + A config of clients (apiservers) to access the webhooks' server (operators). + + This dictionary is put into managed webhook configurations "as is". + The fields & type annotations are only for hinting. + + Kopf additionally modifies the url and the service's path to inject + handler ids as the last path component. This must be taken into account + by custom webhook servers. + """ + caBundle: Optional[str] # if absent, the default apiservers' trust chain is used. + url: Optional[str] + service: Optional[WebhookClientConfigService] + + +class WebhookFn(Protocol): + """ + A framework-provided function to call when a admission request is received. + + The framework provides the actual function. Custom webhook servers must + accept the function, invoke it accordingly on admission requests, wait + for the admission response, serialise it and send it back. They do not + implement this function. This protocol only declares the exact signature. + """ + def __call__( + self, + request: Request, + *, + webhook: Optional[str] = None, + headers: Optional[Mapping[str, str]] = None, + sslpeer: Optional[Mapping[str, Any]] = None, + ) -> Awaitable[Response]: ... + + +# A server (either a coroutine or a callable object). +WebhookServerProtocol = Callable[[WebhookFn], AsyncIterator[WebhookClientConfig]] diff --git a/kopf/toolkits/webhooks.py b/kopf/toolkits/webhooks.py new file mode 100644 index 00000000..fb59dff9 --- /dev/null +++ b/kopf/toolkits/webhooks.py @@ -0,0 +1,537 @@ +""" +Several webhooks servers & tunnels supported out of the box. +""" +import asyncio +import base64 +import contextlib +import functools +import ipaddress +import json +import logging +import os +import pathlib +import socket +import ssl +import tempfile +import urllib.parse +from typing import TYPE_CHECKING, AsyncIterator, Collection, Dict, Iterable, Optional, Tuple, Union + +import aiohttp.web + +from kopf.reactor import admission +from kopf.structs import reviews + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + StrPath = Union[str, os.PathLike[str]] +else: + StrPath = Union[str, os.PathLike] + + +class MissingDependencyError(ImportError): + """ A server/tunnel is used which requires an optional dependency. """ + + +class WebhookServer: + """ + A local HTTP/HTTPS endpoint. + + Currently, the server is based on ``aiohttp``, but the implementation + can change in the future without warning. + + This server is also used by specialised tunnels when they need + a local endpoint to be tunneled. + + * ``addr``, ``port`` is where to listen for connections + (defaults to ``localhost`` and ``9443``). + * ``path`` is the root path for a webhook server + (defaults to no root path). + * ``host`` is an optional override of the hostname for webhook URLs; + if not specified, the ``addr`` will be used. + + Kubernetes requires HTTPS, so HTTPS is the default mode of the server. + This webhook server supports SSL both for the server certificates + and for client certificates (e.g., for authentication) at the same time: + + * ``cadata``, ``cafile`` is the CA bundle to be passed as a "client config" + to the webhook configuration objects, to be used by clients/apiservers + when talking to the webhook server; it is not used in the server itself. + * ``cadump`` is a path to save the resulting CA bundle to be used + by clients, i.e. apiservers; it can be passed to ``curl --cacert ...``; + if ``cafile`` is provided, it contains the same content. + * ``certfile``, ``pkeyfile`` define the server's endpoint certificate; + if not specified, a self-signed certificate and CA will be generated + for both ``addr`` & ``host`` as SANs (but only ``host`` for CommonName). + * ``password`` is either for decrypting the provided ``pkeyfile``, + or for encrypting and decrypting the generated private key. + * ``extra_sans`` are put into the self-signed certificate as SANs (DNS/IP) + in addition to the host & addr (in case some other endpoints exist). + * ``verify_mode``, ``verify_cafile``, ``verify_capath``, ``verify_cadata`` + will be loaded into the SSL context for verifying the client certificates + when provided and if provided by the clients, i.e. apiservers or curl; + (`ssl.SSLContext.verify_mode`, `ssl.SSLContext.load_verify_locations`). + * ``insecure`` flag disables HTTPS and runs an HTTP webhook server. + This is used in ngrok for a local endpoint, but can be used for debugging + or when the certificate-generating dependencies/extras are not installed. + """ + DEFAULT_HOST: Optional[str] = None + + addr: Optional[str] # None means "all interfaces" + port: Optional[int] # None means random port + host: Optional[str] + path: Optional[str] + + cadata: Optional[bytes] # -> .webhooks.*.clientConfig.caBundle + cafile: Optional[StrPath] + cadump: Optional[StrPath] + + context: Optional[ssl.SSLContext] + insecure: bool + certfile: Optional[StrPath] + pkeyfile: Optional[StrPath] + password: Optional[str] + + extra_sans: Iterable[str] + + verify_mode: Optional[ssl.VerifyMode] + verify_cafile: Optional[StrPath] + verify_capath: Optional[StrPath] + verify_cadata: Optional[Union[str, bytes]] + + def __init__( + self, + *, + # Listening socket, root URL path, and the reported URL hostname: + addr: Optional[str] = None, + port: Optional[int] = None, + path: Optional[str] = None, + host: Optional[str] = None, + # The CA bundle to be passed to "client configs": + cadata: Optional[bytes] = None, + cafile: Optional[StrPath] = None, + cadump: Optional[StrPath] = None, + # A pre-configured SSL context (if any): + context: Optional[ssl.SSLContext] = None, + # The server's own certificate, or lack of it (loaded into the context): + insecure: bool = False, # http is needed for ngrok + certfile: Optional[StrPath] = None, + pkeyfile: Optional[StrPath] = None, + password: Optional[str] = None, + # Generated certificate's extra info. + extra_sans: Iterable[str] = (), + # Verification of client certificates (loaded into the context): + verify_mode: Optional[ssl.VerifyMode] = None, + verify_cafile: Optional[StrPath] = None, + verify_capath: Optional[StrPath] = None, + verify_cadata: Optional[Union[str, bytes]] = None, + ) -> None: + super().__init__() + self.addr = addr + self.port = port + self.path = path + self.host = host + self.cadata = cadata + self.cafile = cafile + self.cadump = cadump + self.context = context + self.insecure = insecure + self.certfile = certfile + self.pkeyfile = pkeyfile + self.password = password + self.extra_sans = extra_sans + self.verify_mode = verify_mode + self.verify_cafile = verify_cafile + self.verify_capath = verify_capath + self.verify_cadata = verify_cadata + + async def __call__(self, fn: reviews.WebhookFn) -> AsyncIterator[reviews.WebhookClientConfig]: + + # Redefine as a coroutine instead of a partial to avoid warnings from aiohttp. + async def _serve_fn(request: aiohttp.web.Request) -> aiohttp.web.Response: + return await self._serve(fn, request) + + cadata, context = self._build_ssl() + path = self.path.rstrip('/') if self.path else '' + app = aiohttp.web.Application() + app.add_routes([aiohttp.web.post(f"{path}/{{id:.*}}", _serve_fn)]) + runner = aiohttp.web.AppRunner(app, handle_signals=False) + await runner.setup() + try: + addr = self.addr or None # None is aiohttp's "any interface" + port = self.port or self._allocate_free_port() + site = aiohttp.web.TCPSite(runner, addr, port, ssl_context=context) + await site.start() + + # Log with the actual URL: normalised, with hostname/port set. + schema = 'http' if context is None else 'https' + url = self._build_url(schema, addr or '*', port, self.path or '') + logger.debug(f"Listening for webhooks at {url}") + host = self.host or self.DEFAULT_HOST or self._get_accessible_addr(self.addr) + url = self._build_url(schema, host, port, self.path or '') + logger.debug(f"Accessing the webhooks at {url}") + + client_config = reviews.WebhookClientConfig(url=url) + if cadata is not None: + client_config['caBundle'] = base64.b64encode(cadata).decode('ascii') + + yield client_config + await asyncio.Event().wait() + finally: + # On any reason of exit, stop serving the endpoint. + await runner.cleanup() + + @staticmethod + async def _serve( + fn: reviews.WebhookFn, + request: aiohttp.web.Request, + ) -> aiohttp.web.Response: + """ + Serve a single admission request: an aiohttp-specific implementation. + + Mind 2 different ways the errors are reported: + + * Directly by the webhook's response, i.e. to the apiservers. + This means that the webhook request was done improperly; + the original API request might be good, but we could not confirm that. + * In ``.response.status``, as apiservers send it to the requesting user. + This means that the original API operation was done improperly, + while the webhooks are functional. + """ + # The extra information that is passed down to handlers for authentication/authorization. + # Note: this is an identity of an apiserver, not of the user that sends an API request. + headers = dict(request.headers) + sslpeer = request.transport.get_extra_info('peercert') if request.transport else None + webhook = request.match_info.get('id') + try: + text = await request.text() + data = json.loads(text) + response = await fn(data, webhook=webhook, sslpeer=sslpeer, headers=headers) + return aiohttp.web.json_response(response) + except admission.AmbiguousResourceError as e: + raise aiohttp.web.HTTPConflict(reason=str(e)) + except admission.UnknownResourceError as e: + raise aiohttp.web.HTTPNotFound(reason=str(e)) + except admission.WebhookError as e: + raise aiohttp.web.HTTPBadRequest(reason=str(e)) + except json.JSONDecodeError as e: + raise aiohttp.web.HTTPBadRequest(reason=str(e)) + + @staticmethod + def _allocate_free_port() -> int: + with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind(('', 0)) # '' is a special IPv4 form for "any interface" + return int(s.getsockname()[1]) + + @staticmethod + def _get_accessible_addr(addr: Optional[str]) -> str: + """ + Convert a "catch-all" listening address to the accessible hostname. + + "Catch-all" interfaces like `0.0.0.0` or `::/0` can be used + for listening to utilise all interfaces, but cannot be accessed. + Some other real ("specified") address must be used for that. + + If the address is not IPv4/IPv6 address or is a regular "specified" + address, it is used as is. Only the special addressed are overridden. + """ + if addr is None: + return 'localhost' # and let the system resolved it to IPv4/IPv6 + try: + ipv4 = ipaddress.IPv4Address(addr) + except ipaddress.AddressValueError: + pass + else: + return '127.0.0.1' if ipv4.is_unspecified else addr + try: + ipv6 = ipaddress.IPv6Address(addr) + except ipaddress.AddressValueError: + pass + else: + return '::1' if ipv6.is_unspecified else addr + return addr + + @staticmethod + def _build_url(schema: str, host: str, port: int, path: str) -> str: + try: + ipv6 = ipaddress.IPv6Address(host) + except ipaddress.AddressValueError: + pass + else: + host = f'[{ipv6}]' + is_default_port = ((schema == 'http' and port == 80) or + (schema == 'https' and port == 443)) + netloc = host if is_default_port else f'{host}:{port}' + return urllib.parse.urlunsplit([schema, netloc, path, '', '']) + + def _build_ssl(self) -> Tuple[Optional[bytes], Optional[ssl.SSLContext]]: + """ + A macros to construct an SSL context, possibly generating SSL certs. + + Returns a CA bundle to be passed to the "client configs", + and a properly initialised SSL context to be used by the server. + Or ``None`` for both if an HTTP server is needed. + """ + cadata = self.cadata + context = self.context + if self.insecure and self.context is not None: + raise ValueError("Insecure mode cannot have an SSL context specified.") + + # Read the provided CA bundle for webhooks' "client config"; not used by the server itself. + if cadata is None and self.cafile is not None: + cadata = pathlib.Path(self.cafile).read_bytes() + + # Kubernetes does not work with HTTP, so we do not bother and always run HTTPS too. + # Except when explicitly said to be insecure, e.g. by ngrok (free plan only supports HTTP). + if context is None and not self.insecure: + context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) + + if context is not None: + + # Load a CA for verifying the client certificates (if provided) by this server. + if self.verify_mode is not None: + context.verify_mode = self.verify_mode + if self.verify_cafile or self.verify_capath or self.verify_cadata: + logger.debug("Loading a CA for client certificate verification.") + context.load_verify_locations( + self.verify_cafile, + self.verify_capath, + self.verify_cadata, + ) + if context.verify_mode == ssl.CERT_NONE: + context.verify_mode = ssl.CERT_OPTIONAL + + # Load the specified server's certificate, or generate a self-signed one if possible. + # If cafile/cadata are not defined, use the server's certificate as a CA for clients. + if self.certfile is not None and self.pkeyfile is not None: + logger.debug("Using a provided certificate for HTTPS.") + context.load_cert_chain( + self.certfile, + self.pkeyfile, + self.password, + ) + if cadata is None and self.certfile is not None: + cadata = pathlib.Path(self.certfile).read_bytes() + else: + logger.debug("Generating a self-signed certificate for HTTPS.") + host = self.host or self.DEFAULT_HOST + addr = self._get_accessible_addr(self.addr) + hostnames = [host or addr, addr] + list(self.extra_sans) + certdata, pkeydata = self.build_certificate(hostnames, self.password) + with tempfile.NamedTemporaryFile() as certf, tempfile.NamedTemporaryFile() as pkeyf: + certf.write(certdata) + pkeyf.write(pkeydata) + certf.flush() + pkeyf.flush() + context.load_cert_chain(certf.name, pkeyf.name, self.password) + + # For a self-signed certificate, the CA bundle is the certificate itself, + # regardless of what cafile/cadata are provided from outside. + cadata = certdata + + # Dump the provided or self-signed CA (but not the key!), e.g. for `curl --cacert ...` + if self.cadump is not None and cadata is not None: + pathlib.Path(self.cadump).write_bytes(cadata) + + return cadata, context + + @staticmethod + def build_certificate( + hostnames: Collection[str], + password: Optional[str] = None, + ) -> Tuple[bytes, bytes]: + """ + Build a self-signed certificate with SANs (subject alternative names). + + Returns a tuple of the certificate and its private key (PEM-formatted). + + The certificate is "minimally sufficient", without much of the extra + information on the subject besides its common and alternative names. + However, IP addresses are properly recognised and normalised for better + compatibility with strict SSL clients (like apiservers of Kubernetes). + The first non-IP hostname becomes the certificate's common name -- + by convention, non-configurable. If no hostnames are found, the first + IP address is used as a fallback. Magic IPs like 0.0.0.0 are excluded. + + ``certbuilder`` is used as an implementation because it is lightweight: + 2.9 MB vs. 8.7 MB for cryptography. Still, it is too heavy to include + as a normal runtime dependency (for 8.8 MB of Kopf itself), so it is + only available as the ``kopf[dev]`` extra for development-mode dependencies. + This can change in the future if self-signed certificates become used + at runtime (e.g. in production/staging environments or other real clusters). + """ + try: + import certbuilder + import oscrypto.asymmetric + except ImportError: + raise MissingDependencyError( + "Using self-signed certificates requires an extra dependency: " + "run `pip install certbuilder` or `pip install kopf[dev]`. " + "Or pass `insecure=True` to a webhook server to use only HTTP. " + "Or generate your own certificates and pass as certfile=/pkeyfile=. " + "More: https://kopf.readthedocs.io/en/stable/admission/") + + # Detect which ones of the hostnames are probably IPv4/IPv6 addresses. + # A side-effect: bring them all to their canonical forms. + parsed_ips: Dict[str, Union[ipaddress.IPv4Address, ipaddress.IPv6Address]] = {} + for hostname in hostnames: + try: + parsed_ips[hostname] = ipaddress.IPv4Address(hostname) + except ipaddress.AddressValueError: + pass + try: + parsed_ips[hostname] = ipaddress.IPv6Address(hostname) + except ipaddress.AddressValueError: + pass + + # Later, only the normalised IPs are used as SANs, not the raw IPs. + # Remove bindable but non-accessible addresses (like 0.0.0.0) form the SANs. + true_hostnames = [hostname for hostname in hostnames if hostname not in parsed_ips] + accessible_ips = [str(ip) for ip in parsed_ips.values() if not ip.is_unspecified] + + # Build a certificate as the framework believe is good enough for itself. + subject = {'common_name': true_hostnames[0] if true_hostnames else accessible_ips[0]} + public_key, private_key = oscrypto.asymmetric.generate_pair('rsa', bit_size=2048) + builder = certbuilder.CertificateBuilder(subject, public_key) + builder.ca = True + builder.key_usage = {'digital_signature', 'key_encipherment', 'key_cert_sign', 'crl_sign'} + builder.extended_key_usage = {'server_auth', 'client_auth'} + builder.self_signed = True + builder.subject_alt_ips = list(set(accessible_ips)) # deduplicate + builder.subject_alt_domains = list(set(true_hostnames) | set(accessible_ips)) # deduplicate + certificate = builder.build(private_key) + cert_pem: bytes = certbuilder.pem_armor_certificate(certificate) + pkey_pem: bytes = oscrypto.asymmetric.dump_private_key(private_key, password, target_ms=10) + return cert_pem, pkey_pem + + +class WebhookK3dServer(WebhookServer): + """ + A tunnel from inside of K3d/K3s to its host where the operator is running. + + With this tunnel, a developer can develop the webhooks when fully offline, + since all the traffic is local and never leaves the host machine. + + The forwarding is maintained by K3d itself. This tunnel only replaces + the endpoints for the Kubernetes webhook and injects an SSL certificate + with proper CN/SANs --- to match Kubernetes's SSL validity expectations. + """ + DEFAULT_HOST = 'host.k3d.internal' + + +class WebhookMinikubeServer(WebhookServer): + """ + A tunnel from inside of Minikube to its host where the operator is running. + + With this tunnel, a developer can develop the webhooks when fully offline, + since all the traffic is local and never leaves the host machine. + + The forwarding is maintained by Minikube itself. This tunnel only replaces + the endpoints for the Kubernetes webhook and injects an SSL certificate + with proper CN/SANs --- to match Kubernetes's SSL validity expectations. + """ + DEFAULT_HOST = 'host.minikube.internal' + + +class WebhookNgrokTunnel: + """ + Tunnel admission webhook request via an external tunnel: ngrok_. + + .. _ngrok: https://ngrok.com/ + + ``addr``, ``port``, and ``path`` have the same meaning as in + `kopf.WebhookServer`: where to listen for connections locally. + Ngrok then tunnels this endpoint remotely with. + + Mind that the ngrok webhook tunnel runs the local webhook server + in an insecure (HTTP) mode. For secure (HTTPS) mode, a paid subscription + and properly issued certificates are needed. This goes beyond Kopf's scope. + If needed, implement your own ngrok tunnel. + + Besides, ngrok tunnel does not report any CA to the webhook client configs. + It is expected that the default trust chain is sufficient for ngrok's certs. + + ``token`` can be used for paid subscriptions, which lifts some limitations. + Otherwise, the free plan has a limit of 40 requests per minute + (this should be enough for local development). + + ``binary``, if set, will use the specified ``ngrok`` binary path; + otherwise, ``pyngrok`` downloads the binary at runtime (not recommended). + + .. warning:: + + The public URL is not properly protected and a malicious user + can send requests to a locally running operator. If the handlers + only process the data and make no side effects, this should be fine. + + Despite ngrok provides basic auth ("username:password"), + Kubernetes does not permit this information in the URLs. + + Ngrok partially "protects" the URLS by assigning them random hostnames. + Additionally, you can add random paths. However, this is not "security", + only a bit of safety for a short time (enough for development runs). + """ + addr: Optional[str] # None means "any interface" + port: Optional[int] # None means a random port + path: Optional[str] + token: Optional[str] + region: Optional[str] + binary: Optional[StrPath] + + def __init__( + self, + *, + addr: Optional[str] = None, + port: Optional[int] = None, + path: Optional[str] = None, + token: Optional[str] = None, + region: Optional[str] = None, + binary: Optional[StrPath] = None, + ) -> None: + super().__init__() + self.addr = addr + self.port = port + self.path = path + self.token = token + self.region = region + self.binary = binary + + async def __call__(self, fn: reviews.WebhookFn) -> AsyncIterator[reviews.WebhookClientConfig]: + try: + from pyngrok import conf, ngrok + except ImportError: + raise MissingDependencyError( + "Using ngrok webhook tunnel requires an extra dependency: " + "run `pip install pyngrok` or `pip install kopf[dev]`. " + "More: https://kopf.readthedocs.io/en/stable/admission/") + + if self.binary is not None: + conf.get_default().ngrok_path = str(self.binary) + if self.region is not None: + conf.get_default().region = self.region + if self.token is not None: + ngrok.set_auth_token(self.token) + + # Ngrok only supports HTTP with a free plan; HTTPS requires a paid subscription. + local_server = WebhookServer(addr=self.addr, port=self.port, path=self.path, insecure=True) + tunnel: Optional[ngrok.NgrokTunnel] = None + loop = asyncio.get_running_loop() + try: + async for client_config in local_server(fn): + + # Re-create the tunnel for each new local endpoint (if it did change at all). + if tunnel is not None: + await loop.run_in_executor(None, ngrok.disconnect, tunnel.public_url) + parsed = urllib.parse.urlparse(client_config['url']) + tunnel = await loop.run_in_executor( + None, functools.partial(ngrok.connect, f'{parsed.port}', bind_tls=True)) + + # Adjust for local webhook server specifics (no port, but with the same path). + # Report no CA bundle -- ngrok's certs (Let's Encrypt) are in a default trust chain. + url = f"{tunnel.public_url}{self.path or ''}" + logger.debug(f"Accessing the webhooks at {url}") + yield reviews.WebhookClientConfig(url=url) # e.g. 'https://e5fc05f6494b.ngrok.io/xyz' + finally: + if tunnel is not None: + await loop.run_in_executor(None, ngrok.disconnect, tunnel.public_url) diff --git a/requirements.txt b/requirements.txt index 49163553..3f3a64cb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,3 +16,6 @@ codecov coveralls mypy==0.800 isort>=5.5.0 +certvalidator +certbuilder +pyngrok diff --git a/setup.py b/setup.py index 5951e73d..15645806 100644 --- a/setup.py +++ b/setup.py @@ -42,15 +42,23 @@ 'setuptools_scm', ], install_requires=[ - 'typing_extensions', - 'python-json-logger', - 'click', - 'iso8601', - 'aiohttp<4.0.0', - 'aiojobs', + 'typing_extensions', # 0.20 MB + 'python-json-logger', # 0.05 MB + 'iso8601', # 0.07 MB + 'click', # 0.60 MB + 'aiojobs', # 0.07 MB + 'aiohttp<4.0.0', # 7.80 MB ], extras_require={ - 'full-auth': ['pykube-ng', 'kubernetes'], + 'full-auth': [ + 'pykube-ng', # 4.90 MB + 'kubernetes', # 40.0 MB (!) + ], + 'dev': [ + 'pyngrok', # 1.00 MB + downloaded binary + 'oscrypto', # 2.80 MB (smaller than cryptography: 8.7 MB) + 'certbuilder', # +0.1 MB (2.90 MB if alone) + ], }, package_data={"kopf": ["py.typed"]}, ) diff --git a/tests/admission/conftest.py b/tests/admission/conftest.py new file mode 100644 index 00000000..8126df4b --- /dev/null +++ b/tests/admission/conftest.py @@ -0,0 +1,141 @@ +import asyncio +import dataclasses +import warnings +from unittest.mock import Mock + +import pytest + +from kopf.reactor.indexing import OperatorIndexers +from kopf.structs.references import Insights, Resource +from kopf.structs.reviews import CreateOptions, Request, RequestKind, RequestPayload, \ + RequestResource, UserInfo, WebhookFn +from kopf.toolkits.webhooks import WebhookServer + + +# TODO: LATER: Fix this issue some day later. +@pytest.fixture() +def no_serverside_resource_warnings(): + """ + Hide an irrelevant ResourceWarning on the server side: + + It happens when a client disconnects from the webhook server, + and the server closes the transport for that client. The garbage + collector calls ``__del__()`` on the SSL proto object, despite + it is not close to the moment. + """ + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', + category=ResourceWarning, + module='asyncio.sslproto', + message='unclosed transport') + yield + + +# TODO: LATER: Fix this issue after aiohttp 4.0.0 is used. +@pytest.fixture() +async def no_clientside_resource_warnings(): + """ + Hide an irrelevant ResourceWarning on the client side. + + https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown + """ + yield + await asyncio.sleep(0.100) + + +@pytest.fixture() +async def no_sslproto_warnings(no_serverside_resource_warnings, no_clientside_resource_warnings): + pass + + +# cert generation is somewhat slow (~1s) +@pytest.fixture(scope='module') +def certpkey(): + cert, pkey = WebhookServer.build_certificate(['localhost', '127.0.0.1']) + return cert, pkey + + +@pytest.fixture() +def certfile(tmpdir, certpkey): + path = tmpdir.join('cert.pem') + path.write_binary(certpkey[0]) + return str(path) + + +@pytest.fixture() +def pkeyfile(tmpdir, certpkey): + path = tmpdir.join('pkey.pem') + path.write_binary(certpkey[1]) + return str(path) + + +@pytest.fixture() +def adm_request(resource): + return Request( + apiVersion='admission.k8s.io/v1', + kind='AdmissionReview', + request=RequestPayload( + uid='uid1', + kind=RequestKind(group=resource.group, version=resource.version, kind=resource.kind), + resource=RequestResource(group=resource.group, version=resource.version, resource=resource.plural), + requestKind=RequestKind(group=resource.group, version=resource.version, kind=resource.kind), + requestResource=RequestResource(group=resource.group, version=resource.version, resource=resource.plural), + userInfo=UserInfo(username='user1', uid='useruid1', groups=['group1']), + name='name1', + namespace='ns1', + operation='CREATE', + options=CreateOptions(apiVersion='meta.k8s.io/v1', kind='CreateOptions'), + object={'spec': {'field': 'value'}}, + oldObject=None, + dryRun=False, + )) + + +@dataclasses.dataclass(frozen=True) +class Responder: + fn: WebhookFn + fut: asyncio.Future # asyncio.Future[Response] + + +@pytest.fixture() +async def responder() -> Responder: + fut = asyncio.Future() + async def fn(*_, **__): + return await fut + return Responder(fn=fn, fut=fut) + + +@pytest.fixture() +async def insights(settings, resource): + val_resource = Resource('admissionregistration.k8s.io', 'v1', 'validatingwebhookconfigurations') + mut_resource = Resource('admissionregistration.k8s.io', 'v1', 'mutatingwebhookconfigurations') + insights = Insights() + insights.resources.add(resource) + await insights.backbone.fill(resources=[val_resource, mut_resource]) + insights.ready_resources.set() + return insights + + +@pytest.fixture() +def indices(): + indexers = OperatorIndexers() + return indexers.indices + + +@dataclasses.dataclass(frozen=True, eq=False) +class K8sMocks: + patch_obj: Mock + create_obj: Mock + post_event: Mock + sleep_or_wait: Mock + + +@pytest.fixture(autouse=True) +def k8s_mocked(mocker): + # We mock on the level of our own K8s API wrappers, not the K8s client. + return K8sMocks( + patch_obj=mocker.patch('kopf.clients.patching.patch_obj', return_value={}), + create_obj=mocker.patch('kopf.clients.creating.create_obj', return_value={}), + post_event=mocker.patch('kopf.clients.events.post_event'), + sleep_or_wait=mocker.patch('kopf.structs.primitives.sleep_or_wait', return_value=None), + ) diff --git a/tests/admission/test_admission_manager.py b/tests/admission/test_admission_manager.py new file mode 100644 index 00000000..b4079227 --- /dev/null +++ b/tests/admission/test_admission_manager.py @@ -0,0 +1,226 @@ +import pytest + +import kopf +from kopf.clients.errors import APIConflictError, APIError, APIForbiddenError, APIUnauthorizedError +from kopf.reactor.admission import configuration_manager +from kopf.structs.handlers import WebhookType +from kopf.structs.primitives import Container +from kopf.structs.references import MUTATING_WEBHOOK, VALIDATING_WEBHOOK + + +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_nothing_happens_if_not_managed( + mocker, settings, registry, insights, selector, reason, k8s_mocked): + + container = Container() + mocker.patch.object(insights.ready_resources, 'wait') # before the general Event.wait! + mocker.patch.object(insights.backbone, 'wait_for') + mocker.patch.object(container, 'as_changed') + mocker.patch('asyncio.Event.wait') + + settings.admission.managed = None + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert not insights.ready_resources.wait.called + assert not insights.backbone.wait_for.called + assert not k8s_mocked.create_obj.called + assert not k8s_mocked.patch_obj.called + assert not container.as_changed.called + + +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_creation_is_attempted( + mocker, settings, registry, insights, selector, resource, reason, k8s_mocked): + + container = Container() + mocker.patch.object(container, 'as_changed', return_value=aiter([])) + + settings.admission.managed = 'xyz' + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert k8s_mocked.create_obj.call_count == 1 + assert k8s_mocked.create_obj.call_args_list[0][1]['resource'].group == 'admissionregistration.k8s.io' + assert k8s_mocked.create_obj.call_args_list[0][1]['name'] == 'xyz' + + +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_creation_ignores_if_exists_already( + mocker, settings, registry, insights, selector, resource, reason, k8s_mocked): + + container = Container() + mocker.patch.object(container, 'as_changed', return_value=aiter([])) + k8s_mocked.create_obj.side_effect = APIConflictError({}, status=409) + + settings.admission.managed = 'xyz' + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert k8s_mocked.create_obj.call_count == 1 + assert k8s_mocked.create_obj.call_args_list[0][1]['resource'].group == 'admissionregistration.k8s.io' + assert k8s_mocked.create_obj.call_args_list[0][1]['name'] == 'xyz' + + +@pytest.mark.parametrize('error', {APIError, APIForbiddenError, APIUnauthorizedError}) +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_creation_escalates_on_errors( + mocker, settings, registry, insights, selector, resource, reason, k8s_mocked, error): + + container = Container() + mocker.patch.object(container, 'as_changed', return_value=aiter([])) + k8s_mocked.create_obj.side_effect = error({}, status=400) + + with pytest.raises(error): + settings.admission.managed = 'xyz' + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert k8s_mocked.create_obj.call_count == 1 + assert k8s_mocked.create_obj.call_args_list[0][1]['resource'].group == 'admissionregistration.k8s.io' + assert k8s_mocked.create_obj.call_args_list[0][1]['name'] == 'xyz' + + +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_patching_on_changes( + mocker, settings, registry, insights, selector, resource, reason, k8s_mocked): + + @kopf.on.validate(*resource, registry=registry) + def fn_v(**_): ... + + @kopf.on.mutate(*resource, registry=registry) + def fn_m(**_): ... + + container = Container() + mocker.patch.object(container, 'as_changed', return_value=aiter([ + {'url': 'https://hostname1/'}, + {'url': 'https://hostname2/'}, + ])) + + settings.admission.managed = 'xyz' + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert k8s_mocked.patch_obj.call_count == 3 + assert k8s_mocked.patch_obj.call_args_list[0][1]['resource'].group == 'admissionregistration.k8s.io' + assert k8s_mocked.patch_obj.call_args_list[0][1]['name'] == 'xyz' + assert k8s_mocked.patch_obj.call_args_list[1][1]['resource'].group == 'admissionregistration.k8s.io' + assert k8s_mocked.patch_obj.call_args_list[1][1]['name'] == 'xyz' + assert k8s_mocked.patch_obj.call_args_list[2][1]['resource'].group == 'admissionregistration.k8s.io' + assert k8s_mocked.patch_obj.call_args_list[2][1]['name'] == 'xyz' + + patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] + assert patch['webhooks'] + assert patch['webhooks'][0]['clientConfig']['url'].startswith('https://hostname1/') + assert patch['webhooks'][0]['rules'] + assert patch['webhooks'][0]['rules'][0]['resources'] == ['kopfexamples'] + + patch = k8s_mocked.patch_obj.call_args_list[1][1]['patch'] + assert patch['webhooks'] + assert patch['webhooks'][0]['clientConfig']['url'].startswith('https://hostname2/') + assert patch['webhooks'][0]['rules'] + assert patch['webhooks'][0]['rules'][0]['resources'] == ['kopfexamples'] + + +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_patching_purges_non_permanent_webhooks( + mocker, settings, registry, insights, selector, resource, reason, k8s_mocked): + + @kopf.on.validate(*resource, registry=registry, persistent=False) + def fn_v(**_): ... + + @kopf.on.mutate(*resource, registry=registry, persistent=False) + def fn_m(**_): ... + + container = Container() + mocker.patch.object(container, 'as_changed', return_value=aiter([ + {'url': 'https://hostname/'}, + ])) + + settings.admission.managed = 'xyz' + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert k8s_mocked.patch_obj.call_count == 2 + patch = k8s_mocked.patch_obj.call_args_list[-1][1]['patch'] + assert not patch['webhooks'] + + +@pytest.mark.parametrize('reason', set(WebhookType)) +@pytest.mark.parametrize('selector', {VALIDATING_WEBHOOK, MUTATING_WEBHOOK}) +async def test_patching_leaves_permanent_webhooks( + mocker, settings, registry, insights, selector, resource, reason, k8s_mocked): + + @kopf.on.validate(*resource, registry=registry, persistent=True) + def fn_v(**_): ... + + @kopf.on.mutate(*resource, registry=registry, persistent=True) + def fn_m(**_): ... + + container = Container() + mocker.patch.object(container, 'as_changed', return_value=aiter([ + {'url': 'https://hostname/'}, + ])) + + settings.admission.managed = 'xyz' + await configuration_manager( + reason=reason, + selector=selector, + registry=registry, + settings=settings, + insights=insights, + container=container, + ) + + assert k8s_mocked.patch_obj.call_count == 2 + patch = k8s_mocked.patch_obj.call_args_list[-1][1]['patch'] + assert patch['webhooks'][0]['clientConfig']['url'].startswith('https://hostname/') + assert patch['webhooks'][0]['rules'] + assert patch['webhooks'][0]['rules'][0]['resources'] == ['kopfexamples'] + + +async def aiter(src): + for item in src: + yield item diff --git a/tests/admission/test_admission_server.py b/tests/admission/test_admission_server.py new file mode 100644 index 00000000..3e81a200 --- /dev/null +++ b/tests/admission/test_admission_server.py @@ -0,0 +1,51 @@ +import pytest + +import kopf +from kopf.reactor.admission import admission_webhook_server +from kopf.structs.primitives import Container + + +async def webhookfn(*_, **__): + pass + + +async def test_requires_webserver_if_webhooks_are_defined( + settings, registry, insights, resource, k8s_mocked): + + @kopf.on.validate(*resource, registry=registry) + def fn_v(**_): ... + + @kopf.on.mutate(*resource, registry=registry) + def fn_m(**_): ... + + container = Container() + with pytest.raises(Exception) as err: + settings.admission.server = None + await admission_webhook_server( + registry=registry, + settings=settings, + insights=insights, + container=container, + webhookfn=webhookfn, + ) + + assert "Admission handlers exist, but no admission server/tunnel" in str(err.value) + + +async def test_configures_client_configs( + settings, registry, insights, resource, k8s_mocked): + + async def server(_): + yield {'url': 'https://hostname/'} + + container = Container() + settings.admission.server = server + await admission_webhook_server( + registry=registry, + settings=settings, + insights=insights, + container=container, + webhookfn=webhookfn, + ) + + assert container.get_nowait() == {'url': 'https://hostname/'} diff --git a/tests/admission/test_certificates.py b/tests/admission/test_certificates.py new file mode 100644 index 00000000..12978bee --- /dev/null +++ b/tests/admission/test_certificates.py @@ -0,0 +1,43 @@ +import certvalidator +import pytest + +from kopf.toolkits.webhooks import WebhookServer + + +def test_missing_oscrypto(no_oscrypto): + with pytest.raises(ImportError) as err: + WebhookServer.build_certificate(['...']) + assert "pip install certbuilder" in str(err.value) + + +def test_missing_certbuilder(no_certbuilder): + with pytest.raises(ImportError) as err: + WebhookServer.build_certificate(['...']) + assert "pip install certbuilder" in str(err.value) + + +def test_certificate_generation(): + names = ['hostname1', 'hostname2', '001.002.003.004', '0:0:0:0:0:0:0:1'] + cert, pkey = WebhookServer.build_certificate(names) + context = certvalidator.ValidationContext(extra_trust_roots=[cert]) + validator = certvalidator.CertificateValidator(cert, validation_context=context) + path = validator.validate_tls('hostname1') + assert len(path) == 1 # self-signed + assert path.first.ca + assert path.first.self_issued + assert set(path.first.valid_domains) == {'hostname1', 'hostname2', '1.2.3.4', '::1'} + assert set(path.first.valid_ips) == {'1.2.3.4', '::1'} + + +@pytest.mark.parametrize('hostnames, common_name', [ + (['hostname1', 'hostname2'], 'hostname1'), + (['hostname2', 'hostname1'], 'hostname2'), + (['1.2.3.4', 'hostname1'], 'hostname1'), + (['1.2.3.4', '2.3.4.5'], '1.2.3.4'), +]) +def test_common_name_selection(hostnames, common_name): + cert, pkey = WebhookServer.build_certificate(hostnames) + context = certvalidator.ValidationContext(extra_trust_roots=[cert]) + validator = certvalidator.CertificateValidator(cert, validation_context=context) + path = validator.validate_tls(common_name) + assert path.first.subject.native['common_name'] == common_name diff --git a/tests/admission/test_jsonpatch.py b/tests/admission/test_jsonpatch.py new file mode 100644 index 00000000..0b0d4f2b --- /dev/null +++ b/tests/admission/test_jsonpatch.py @@ -0,0 +1,37 @@ +from kopf.structs.patches import Patch + + +def test_addition_of_the_key(): + patch = Patch() + patch['xyz'] = 123 + jsonpatch = patch.as_json_patch() + assert jsonpatch == [ + {'op': 'replace', 'path': '/xyz', 'value': 123}, + ] + + +def test_removal_of_the_key(): + patch = Patch() + patch['xyz'] = None + jsonpatch = patch.as_json_patch() + assert jsonpatch == [ + {'op': 'remove', 'path': '/xyz'}, + ] + + +def test_addition_of_the_subkey(): + patch = Patch() + patch['xyz'] = {'abc': 123} + jsonpatch = patch.as_json_patch() + assert jsonpatch == [ + {'op': 'replace', 'path': '/xyz/abc', 'value': 123}, + ] + + +def test_removal_of_the_subkey(): + patch = Patch() + patch['xyz'] = {'abc': None} + jsonpatch = patch.as_json_patch() + assert jsonpatch == [ + {'op': 'remove', 'path': '/xyz/abc'}, + ] diff --git a/tests/admission/test_managed_webhooks.py b/tests/admission/test_managed_webhooks.py new file mode 100644 index 00000000..0ea0cea8 --- /dev/null +++ b/tests/admission/test_managed_webhooks.py @@ -0,0 +1,235 @@ +import pytest + +import kopf +from kopf.reactor.admission import build_webhooks +from kopf.structs.references import Resource + + +@pytest.fixture() +def handlers(resource, registry): + + @kopf.on.validate(*resource, registry=registry) + def validate_fn(**_): + pass + + @kopf.on.mutate(*resource, registry=registry) + def mutate_fn(**_): + pass + + return registry._resource_webhooks.get_all_handlers() + + +@pytest.mark.parametrize('id, field, exp_name', [ + ('id', None, 'id.sfx'), + ('id.', None, 'id..sfx'), + ('id-', None, 'id-.sfx'), + ('id_', None, 'id-.sfx'), + ('id!', None, 'id21.sfx'), + ('id%', None, 'id25.sfx'), + ('id/sub', None, 'id.sub.sfx'), + ('id', 'fld1.fld2', 'id.fld1.fld2.sfx'), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_name_is_normalised(registry, resource, decorator, id, field, exp_name): + + @decorator(*resource, id=id, field=field, registry=registry) + def fn(**_): + pass + + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 1 + assert webhooks[0]['name'] == exp_name + + +@pytest.mark.parametrize('id, field, exp_url', [ + ('id', None, 'https://hostname/p1/p2/id'), + ('id.', None, 'https://hostname/p1/p2/id.'), + ('id-', None, 'https://hostname/p1/p2/id-'), + ('id_', None, 'https://hostname/p1/p2/id_'), + ('id!', None, 'https://hostname/p1/p2/id%21'), + ('id%', None, 'https://hostname/p1/p2/id%25'), + ('id/sub', None, 'https://hostname/p1/p2/id/sub'), + ('id', 'fld1.fld2', 'https://hostname/p1/p2/id/fld1.fld2'), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_url_is_suffixed(registry, resource, decorator, id, field, exp_url): + + @decorator(*resource, id=id, field=field, registry=registry) + def fn(**_): + pass + + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[resource], + name_suffix='sfx', + client_config={'url': 'https://hostname/p1/p2'}) + + assert len(webhooks) == 1 + assert webhooks[0]['clientConfig']['url'] == exp_url + + +@pytest.mark.parametrize('id, field, exp_path', [ + ('id', None, 'p1/p2/id'), + ('id.', None, 'p1/p2/id.'), + ('id-', None, 'p1/p2/id-'), + ('id_', None, 'p1/p2/id_'), + ('id!', None, 'p1/p2/id%21'), + ('id%', None, 'p1/p2/id%25'), + ('id/sub', None, 'p1/p2/id/sub'), + ('id', 'fld1.fld2', 'p1/p2/id/fld1.fld2'), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_path_is_suffixed(registry, resource, decorator, id, field, exp_path): + + @decorator(*resource, id=id, field=field, registry=registry) + def fn(**_): + pass + + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[resource], + name_suffix='sfx', + client_config={'service': {'path': 'p1/p2'}}) + + assert len(webhooks) == 1 + assert webhooks[0]['clientConfig']['service']['path'] == exp_path + + +@pytest.mark.parametrize('opts, key, val', [ + (dict(side_effects=False), 'sideEffects', 'None'), + (dict(side_effects=True), 'sideEffects', 'NoneOnDryRun'), + (dict(ignore_failures=False), 'failurePolicy', 'Fail'), + (dict(ignore_failures=True), 'failurePolicy', 'Ignore'), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_flat_options_are_mapped(registry, resource, decorator, opts, key, val): + + @decorator(*resource, registry=registry, **opts) + def fn(**_): + pass + + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 1 + assert webhooks[0][key] == val + assert webhooks[0]['matchPolicy'] == 'Equivalent' + assert webhooks[0]['timeoutSeconds'] == 30 + assert webhooks[0]['admissionReviewVersions'] == ['v1', 'v1beta1'] + + +@pytest.mark.parametrize('opts, key, val', [ + (dict(), 'operations', ['*']), + (dict(operation='CREATE'), 'operations', ['CREATE']), + (dict(operation='UPDATE'), 'operations', ['UPDATE']), + (dict(operation='DELETE'), 'operations', ['DELETE']), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_rule_options_are_mapped(registry, resource, decorator, opts, key, val): + + @decorator(*resource, registry=registry, **opts) + def fn(**_): + pass + + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 1 + assert len(webhooks[0]['rules']) == 1 + assert webhooks[0]['rules'][0][key] == val + assert webhooks[0]['rules'][0]['scope'] == '*' + assert webhooks[0]['rules'][0]['apiGroups'] == [resource.group] + assert webhooks[0]['rules'][0]['apiVersions'] == [resource.version] + assert webhooks[0]['rules'][0]['resources'] == [resource.plural] + + +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_multiple_handlers(registry, resource, decorator): + + @decorator(*resource, registry=registry) + def fn1(**_): + pass + + @decorator(*resource, registry=registry) + def fn2(**_): + pass + + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 2 + assert len(webhooks[0]['rules']) == 1 + assert len(webhooks[1]['rules']) == 1 + + +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_irrelevant_resources_are_ignored(registry, resource, decorator): + + @decorator(*resource, registry=registry) + def fn(**_): + pass + + irrelevant_resource = Resource('grp', 'vers', 'plural') + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[irrelevant_resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 1 + assert len(webhooks[0]['rules']) == 0 + + +@pytest.mark.parametrize('label_value, exp_expr', [ + (kopf.PRESENT, {'key': 'lbl', 'operator': 'Exists'}), + (kopf.ABSENT, {'key': 'lbl', 'operator': 'DoesNotExist'}), + ('val', {'key': 'lbl', 'operator': 'In', 'values': ['val']}), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_labels_specific_filter(registry, resource, decorator, label_value, exp_expr): + + @decorator(*resource, registry=registry, labels={'lbl': label_value}) + def fn(**_): + pass + + irrelevant_resource = Resource('grp', 'vers', 'plural') + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[irrelevant_resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 1 + assert webhooks[0]['objectSelector'] == {'matchExpressions': [exp_expr]} + + +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +def test_labels_callable_filter(registry, resource, decorator): + + @decorator(*resource, registry=registry, labels={'lbl': lambda *_, **__: None}) + def fn(**_): + pass + + irrelevant_resource = Resource('grp', 'vers', 'plural') + webhooks = build_webhooks( + registry._resource_webhooks.get_all_handlers(), + resources=[irrelevant_resource], + name_suffix='sfx', + client_config={}) + + assert len(webhooks) == 1 + assert webhooks[0]['objectSelector'] is None diff --git a/tests/admission/test_serving_ephemeral_memos.py b/tests/admission/test_serving_ephemeral_memos.py new file mode 100644 index 00000000..d1a6faa6 --- /dev/null +++ b/tests/admission/test_serving_ephemeral_memos.py @@ -0,0 +1,31 @@ +import pytest + +from kopf.reactor.admission import serve_admission_request + + +@pytest.mark.parametrize('operation', ['CREATE']) +async def test_memo_is_not_remembered_if_admission_is_for_creation( + settings, registry, resource, memories, insights, indices, adm_request, operation): + + adm_request['request']['operation'] = operation + await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + known_memories = list(memories.iter_all_memories()) + assert not known_memories + + +@pytest.mark.parametrize('operation', ['UPDATE', 'DELETE', 'CONNECT', '*WHATEVER*']) +async def test_memo_is_remembered_if_admission_for_other_operations( + settings, registry, resource, memories, insights, indices, adm_request, operation): + + adm_request['request']['operation'] = operation + await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + known_memories = list(memories.iter_all_memories()) + assert len(known_memories) == 1 diff --git a/tests/admission/test_serving_handler_selection.py b/tests/admission/test_serving_handler_selection.py new file mode 100644 index 00000000..073ea669 --- /dev/null +++ b/tests/admission/test_serving_handler_selection.py @@ -0,0 +1,229 @@ +from unittest.mock import Mock + +import pytest + +import kopf +from kopf.reactor.admission import serve_admission_request +from kopf.structs.handlers import HandlerId, WebhookType + + +async def test_all_handlers_with_no_id_or_reason_requested( + settings, registry, resource, memories, insights, indices, adm_request): + + mock1 = Mock() + mock2 = Mock() + mock3 = Mock() + mock4 = Mock() + + @kopf.on.validate(*resource) + def fn1(**kwargs): + mock1(**kwargs) + + @kopf.on.validate(*resource) + def fn2(**kwargs): + mock2(**kwargs) + + @kopf.on.mutate(*resource) + def fn3(**kwargs): + mock3(**kwargs) + + @kopf.on.mutate(*resource) + def fn4(**kwargs): + mock4(**kwargs) + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert mock1.call_count == 1 + assert mock2.call_count == 1 + assert mock3.call_count == 1 + assert mock4.call_count == 1 + + +@pytest.mark.parametrize('reason', set(WebhookType)) +async def test_handlers_with_reason_requested( + settings, registry, resource, memories, insights, indices, adm_request, reason): + + mock1 = Mock() + mock2 = Mock() + mock3 = Mock() + mock4 = Mock() + + @kopf.on.validate(*resource) + def fn1(**kwargs): + mock1(**kwargs) + + @kopf.on.validate(*resource) + def fn2(**kwargs): + mock2(**kwargs) + + @kopf.on.mutate(*resource) + def fn3(**kwargs): + mock3(**kwargs) + + @kopf.on.mutate(*resource) + def fn4(**kwargs): + mock4(**kwargs) + + response = await serve_admission_request( + adm_request, reason=reason, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert mock1.call_count == (1 if reason == WebhookType.VALIDATING else 0) + assert mock2.call_count == (1 if reason == WebhookType.VALIDATING else 0) + assert mock3.call_count == (1 if reason == WebhookType.MUTATING else 0) + assert mock4.call_count == (1 if reason == WebhookType.MUTATING else 0) + + +async def test_handlers_with_webhook_requested( + settings, registry, resource, memories, insights, indices, adm_request): + + mock1 = Mock() + mock2 = Mock() + mock3 = Mock() + mock4 = Mock() + + @kopf.on.validate(*resource, id='fnX') + def fn1(**kwargs): + mock1(**kwargs) + + @kopf.on.validate(*resource) + def fn2(**kwargs): + mock2(**kwargs) + + @kopf.on.mutate(*resource) + def fn3(**kwargs): + mock3(**kwargs) + + @kopf.on.mutate(*resource, id='fnX') + def fn4(**kwargs): + mock4(**kwargs) + + response = await serve_admission_request( + adm_request, webhook=HandlerId('fnX'), + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert mock1.call_count == 1 + assert mock2.call_count == 0 + assert mock3.call_count == 0 + assert mock4.call_count == 1 + + +@pytest.mark.parametrize('reason', set(WebhookType)) +async def test_handlers_with_reason_and_webhook_requested( + settings, registry, resource, memories, insights, indices, adm_request, reason): + + mock1 = Mock() + mock2 = Mock() + mock3 = Mock() + mock4 = Mock() + + @kopf.on.validate(*resource, id='fnX') + def fn1(**kwargs): + mock1(**kwargs) + + @kopf.on.validate(*resource) + def fn2(**kwargs): + mock2(**kwargs) + + @kopf.on.mutate(*resource) + def fn3(**kwargs): + mock3(**kwargs) + + @kopf.on.mutate(*resource, id='fnX') + def fn4(**kwargs): + mock4(**kwargs) + + response = await serve_admission_request( + adm_request, webhook=HandlerId('fnX'), reason=reason, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert mock1.call_count == (1 if reason == WebhookType.VALIDATING else 0) + assert mock2.call_count == 0 + assert mock3.call_count == 0 + assert mock4.call_count == (1 if reason == WebhookType.MUTATING else 0) + + +@pytest.mark.parametrize('operation', ['CREATE', 'UPDATE', 'CONNECT', '*WHATEVER*']) +async def test_mutating_handlers_are_selected_for_nondeletion( + settings, registry, resource, memories, insights, indices, adm_request, operation): + + v_mock = Mock() + m_mock = Mock() + + @kopf.on.validate(*resource) + def v_fn(**kwargs): + v_mock(**kwargs) + + @kopf.on.mutate(*resource) + def m_fn(**kwargs): + m_mock(**kwargs) + + adm_request['request']['operation'] = operation + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert v_mock.call_count == 1 + assert m_mock.call_count == 1 + + +async def test_mutating_handlers_are_not_selected_for_deletion_by_default( + settings, registry, resource, memories, insights, indices, adm_request): + + v_mock = Mock() + m_mock = Mock() + + @kopf.on.validate(*resource) + def v_fn(**kwargs): + v_mock(**kwargs) + + @kopf.on.mutate(*resource) + def m_fn(**kwargs): + m_mock(**kwargs) + + adm_request['request']['operation'] = 'DELETE' + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert v_mock.call_count == 1 + assert m_mock.call_count == 0 + + +async def test_mutating_handlers_are_selected_for_deletion_if_explicitly_marked( + settings, registry, resource, memories, insights, indices, adm_request): + + v_mock = Mock() + m_mock = Mock() + + @kopf.on.validate(*resource) + def v_fn(**kwargs): + v_mock(**kwargs) + + @kopf.on.mutate(*resource, operation='DELETE') + def m_fn(**kwargs): + m_mock(**kwargs) + + adm_request['request']['operation'] = 'DELETE' + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['response']['allowed'] is True + assert v_mock.call_count == 1 + assert m_mock.call_count == 1 diff --git a/tests/admission/test_serving_kwargs_passthrough.py b/tests/admission/test_serving_kwargs_passthrough.py new file mode 100644 index 00000000..939f80fe --- /dev/null +++ b/tests/admission/test_serving_kwargs_passthrough.py @@ -0,0 +1,114 @@ +from unittest.mock import Mock + +import pytest + +import kopf +from kopf.reactor.admission import serve_admission_request + + +@pytest.mark.parametrize('dryrun', [True, False]) +async def test_dryrun_passed( + settings, registry, resource, memories, insights, indices, adm_request, dryrun): + mock = Mock() + + @kopf.on.validate(*resource) + def fn(**kwargs): + mock(**kwargs) + + adm_request['request']['dryRun'] = dryrun + await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert mock.call_count == 1 + assert mock.call_args[1]['dryrun'] == dryrun + + +async def test_headers_passed( + settings, registry, resource, memories, insights, indices, adm_request): + mock = Mock() + + @kopf.on.validate(*resource) + def fn(**kwargs): + mock(**kwargs) + + headers = {'X': '123', 'Y': '456'} + await serve_admission_request( + adm_request, headers=headers, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert mock.call_count == 1 + assert mock.call_args[1]['headers'] == headers + + +async def test_headers_not_passed_but_injected( + settings, registry, resource, memories, insights, indices, adm_request): + mock = Mock() + + @kopf.on.validate(*resource) + def fn(**kwargs): + mock(**kwargs) + + await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert mock.call_count == 1 + assert mock.call_args[1]['headers'] == {} + + +async def test_sslpeer_passed( + settings, registry, resource, memories, insights, indices, adm_request): + mock = Mock() + + @kopf.on.validate(*resource) + def fn(**kwargs): + mock(**kwargs) + + sslpeer = {'X': '123', 'Y': '456'} + await serve_admission_request( + adm_request, sslpeer=sslpeer, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert mock.call_count == 1 + assert mock.call_args[1]['sslpeer'] == sslpeer + + +async def test_sslpeer_not_passed_but_injected( + settings, registry, resource, memories, insights, indices, adm_request): + mock = Mock() + + @kopf.on.validate(*resource) + def fn(**kwargs): + mock(**kwargs) + + await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert mock.call_count == 1 + assert mock.call_args[1]['sslpeer'] == {} + + +async def test_userinfo_passed( + settings, registry, resource, memories, insights, indices, adm_request): + mock = Mock() + + @kopf.on.validate(*resource) + def fn(**kwargs): + mock(**kwargs) + + userinfo = {'X': '123', 'Y': '456'} + adm_request['request']['userInfo'] = userinfo + await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert mock.call_count == 1 + assert mock.call_args[1]['userinfo'] == userinfo diff --git a/tests/admission/test_serving_responses.py b/tests/admission/test_serving_responses.py new file mode 100644 index 00000000..e7ec13a1 --- /dev/null +++ b/tests/admission/test_serving_responses.py @@ -0,0 +1,169 @@ +import base64 +import json + +import pytest + +import kopf +from kopf.reactor.admission import AdmissionError, serve_admission_request +from kopf.reactor.handling import PermanentError, TemporaryError + + +async def test_metadata_reflects_the_request( + settings, registry, memories, insights, indices, adm_request): + + adm_request['apiVersion'] = 'any.group/any.version' + adm_request['kind'] = 'AnyKindOfAdmissionReview' + adm_request['request']['uid'] = 'anyuid' + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert response['apiVersion'] == 'any.group/any.version' + assert response['kind'] == 'AnyKindOfAdmissionReview' + assert response['response']['uid'] == 'anyuid' + + +async def test_simple_response_with_no_handlers_allows_admission( + settings, registry, memories, insights, indices, adm_request): + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert 'warnings' not in response['response'] + assert 'patchType' not in response['response'] + assert 'patch' not in response['response'] + assert 'status' not in response['response'] + assert response['response']['allowed'] is True + + +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +async def test_simple_handler_allows_admission( + settings, registry, resource, memories, insights, indices, adm_request, + decorator): + + @decorator(*resource) + def fn(**_): + pass + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert 'warnings' not in response['response'] + assert 'patchType' not in response['response'] + assert 'patch' not in response['response'] + assert 'status' not in response['response'] + assert response['response']['allowed'] is True + + +@pytest.mark.parametrize('error, exp_msg, exp_code', [ + (Exception("No!"), "No!", 500), + (kopf.PermanentError("No!"), "No!", 500), + (kopf.TemporaryError("No!"), "No!", 500), + (kopf.AdmissionError("No!"), "No!", 500), + (kopf.AdmissionError("No!", code=123), "No!", 123), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +async def test_errors_deny_admission( + settings, registry, resource, memories, insights, indices, adm_request, + decorator, error, exp_msg, exp_code): + + @decorator(*resource) + def fn(**_): + raise error + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert 'warnings' not in response['response'] + assert 'patchType' not in response['response'] + assert 'patch' not in response['response'] + assert response['response']['allowed'] is False + assert response['response']['status'] == {'message': exp_msg, 'code': exp_code} + + +@pytest.mark.parametrize('error1, error2, exp_msg', [ + pytest.param(Exception("err1"), Exception("err2"), "err1", id='builtin-first-samerank'), + pytest.param(TemporaryError("err1"), TemporaryError("err2"), "err1", id='temp-first-samerank'), + pytest.param(PermanentError("err1"), PermanentError("err2"), "err1", id='perm-first-samerank'), + pytest.param(AdmissionError("err1"), AdmissionError("err2"), "err1", id='adms-first-samerank'), + pytest.param(Exception("err1"), TemporaryError("err2"), "err2", id='temp-over-builtin'), + pytest.param(Exception("err1"), AdmissionError("err2"), "err2", id='adms-over-builtin'), + pytest.param(Exception("err1"), PermanentError("err2"), "err2", id='perm-over-builtin'), + pytest.param(TemporaryError("err1"), PermanentError("err2"), "err2", id='perm-over-temp'), + pytest.param(TemporaryError("err1"), AdmissionError("err2"), "err2", id='adms-over-temp'), + pytest.param(PermanentError("err1"), AdmissionError("err2"), "err2", id='adms-over-perm'), +]) +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +async def test_errors_priorities( + settings, registry, resource, memories, insights, indices, adm_request, + decorator, error1, error2, exp_msg): + + @decorator(*resource) + def fn1(**_): + raise error1 + + @decorator(*resource) + def fn2(**_): + raise error2 + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert 'warnings' not in response['response'] + assert 'patchType' not in response['response'] + assert 'patch' not in response['response'] + assert response['response']['allowed'] is False + assert response['response']['status'] == {'message': exp_msg, 'code': 500} + + +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +async def test_warnings_are_returned_to_kubernetes( + settings, registry, resource, memories, insights, indices, adm_request, + decorator): + + @decorator(*resource) + def fn(warnings, **_): + warnings.append("oops!") + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert 'patchType' not in response['response'] + assert 'patch' not in response['response'] + assert 'status' not in response['response'] + assert response['response']['warnings'] == ['oops!'] + assert response['response']['allowed'] is True + + +@pytest.mark.parametrize('decorator', [kopf.on.validate, kopf.on.mutate]) +async def test_patch_is_returned_to_kubernetes( + settings, registry, resource, memories, insights, indices, adm_request, + decorator): + + @decorator(*resource) + def fn(patch, **_): + patch['xyz'] = 123 + + response = await serve_admission_request( + adm_request, + settings=settings, registry=registry, insights=insights, + memories=memories, memobase=object(), indices=indices, + ) + assert 'warnings' not in response['response'] + assert 'status' not in response['response'] + assert response['response']['allowed'] is True + assert response['response']['patchType'] == 'JSONPatch' + assert json.loads(base64.b64decode(response['response']['patch'])) == [ + {'op': 'replace', 'path': '/xyz', 'value': 123}, + ] diff --git a/tests/admission/test_webhook_ngrok.py b/tests/admission/test_webhook_ngrok.py new file mode 100644 index 00000000..20909cfb --- /dev/null +++ b/tests/admission/test_webhook_ngrok.py @@ -0,0 +1,49 @@ +import asyncio + +import pyngrok.conf +import pyngrok.ngrok +import pytest + +from kopf.toolkits.webhooks import WebhookNgrokTunnel + + +@pytest.fixture(autouse=True) +def pyngrok_mock(mocker): + mocker.patch.object(pyngrok.conf, 'get_default') + mocker.patch.object(pyngrok.ngrok, 'set_auth_token') + mocker.patch.object(pyngrok.ngrok, 'connect') + mocker.patch.object(pyngrok.ngrok, 'disconnect') + pyngrok.ngrok.connect.return_value.public_url = 'https://nowhere' + return pyngrok + + +async def test_missing_pyngrok(no_pyngrok, responder): + with pytest.raises(ImportError) as err: + server = WebhookNgrokTunnel() + async for _ in server(responder.fn): + break # do not sleep + assert "pip install pyngrok" in str(err.value) + + +async def test_ngrok_tunnel( + certfile, pkeyfile, responder, pyngrok_mock): + + responder.fut.set_result({'hello': 'world'}) + server = WebhookNgrokTunnel(port=54321, path='/p1/p2', + region='xx', token='xyz', binary='/bin/ngrok') + async for client_config in server(responder.fn): + assert 'caBundle' not in client_config # trust the default CA + assert client_config['url'] == 'https://nowhere/p1/p2' + break # do not sleep + + assert pyngrok_mock.conf.get_default.called + assert pyngrok_mock.conf.get_default.return_value.ngrok_path == '/bin/ngrok' + assert pyngrok_mock.conf.get_default.return_value.region == 'xx' + assert pyngrok_mock.ngrok.set_auth_token.called + assert pyngrok_mock.ngrok.set_auth_token.call_args_list[0][0][0] == 'xyz' + assert pyngrok_mock.ngrok.connect.called + assert pyngrok_mock.ngrok.connect.call_args_list[0][0][0] == '54321' + assert pyngrok_mock.ngrok.connect.call_args_list[0][1]['bind_tls'] == True + assert not pyngrok_mock.ngrok.disconnect.called + + await asyncio.get_running_loop().shutdown_asyncgens() diff --git a/tests/admission/test_webhook_server.py b/tests/admission/test_webhook_server.py new file mode 100644 index 00000000..0d8a772e --- /dev/null +++ b/tests/admission/test_webhook_server.py @@ -0,0 +1,114 @@ +import base64 +import json +import ssl + +import aiohttp +import pytest + +from kopf.reactor.admission import AmbiguousResourceError, MissingDataError, \ + UnknownResourceError, WebhookError +from kopf.toolkits.webhooks import WebhookK3dServer, WebhookMinikubeServer, WebhookServer + + +async def test_starts_as_http_ipv4(responder): + server = WebhookServer(addr='127.0.0.1', port=22533, path='/p1/p2', insecure=True) + async for client_config in server(responder.fn): + assert client_config['url'] == 'http://127.0.0.1:22533/p1/p2' + assert 'caBundle' not in client_config + break # do not sleep + + +async def test_starts_as_http_ipv6(responder): + server = WebhookServer(addr='::1', port=22533, path='/p1/p2', insecure=True) + async for client_config in server(responder.fn): + assert client_config['url'] == 'http://[::1]:22533/p1/p2' + assert 'caBundle' not in client_config + break # do not sleep + + +async def test_unspecified_port_allocates_a_random_port(responder): + server1 = WebhookServer(addr='127.0.0.1', path='/p1/p2', insecure=True) + server2 = WebhookServer(addr='127.0.0.1', path='/p1/p2', insecure=True) + async for client_config1 in server1(responder.fn): + async for client_config2 in server2(responder.fn): + assert client_config1['url'] != client_config2['url'] + break # do not sleep + break # do not sleep + + +async def test_unspecified_addr_uses_all_interfaces(responder, caplog, assert_logs): + caplog.set_level(0) + server = WebhookServer(port=22533, path='/p1/p2', insecure=True) + async for client_config in server(responder.fn): + assert client_config['url'] == 'http://localhost:22533/p1/p2' + break # do not sleep + assert_logs([r"Listening for webhooks at http://\*:22533/p1/p2"]) + + +async def test_webhookserver_starts_as_https_with_selfsigned_cert( + responder): + server = WebhookServer(addr='127.0.0.1', port=22533, path='/p1/p2', host='somehost') + async for client_config in server(responder.fn): + assert client_config['url'] == 'https://somehost:22533/p1/p2' + assert 'caBundle' in client_config # regardless of the value + break # do not sleep + + +async def test_webhookserver_starts_as_https_with_provided_cert( + certfile, pkeyfile, certpkey, responder): + server = WebhookServer(port=22533, certfile=certfile, pkeyfile=pkeyfile) + async for client_config in server(responder.fn): + assert client_config['url'] == 'https://localhost:22533' + assert base64.b64decode(client_config['caBundle']) == certpkey[0] + break # do not sleep + + +@pytest.mark.parametrize('cls, url', [ + (WebhookK3dServer, 'https://host.k3d.internal:22533/p1/p2'), + (WebhookMinikubeServer, 'https://host.minikube.internal:22533/p1/p2'), +]) +async def test_webhookserver_flavours_inject_hostnames( + certfile, pkeyfile, certpkey, responder, cls, url): + server = cls(port=22533, certfile=certfile, pkeyfile=pkeyfile, path='/p1/p2') + async for client_config in server(responder.fn): + assert client_config['url'] == url + break # do not sleep + + +@pytest.mark.usefixtures('no_sslproto_warnings') +async def test_webhookserver_serves( + certfile, pkeyfile, responder, adm_request): + responder.fut.set_result({'hello': 'world'}) + server = WebhookServer(certfile=certfile, pkeyfile=pkeyfile) + async for client_config in server(responder.fn): + cadata = base64.b64decode(client_config['caBundle']).decode('ascii') + sslctx = ssl.create_default_context(cadata=cadata) + async with aiohttp.ClientSession() as client: + async with client.post(client_config['url'], ssl=sslctx, json=adm_request) as resp: + text = await resp.text() + assert text == '{"hello": "world"}' + assert resp.status == 200 + break # do not sleep + + +@pytest.mark.parametrize('code, error', [ + (500, Exception), + (400, WebhookError), + (400, WebhookError), + (400, MissingDataError), + (404, UnknownResourceError), + (409, AmbiguousResourceError), + (400, lambda: json.JSONDecodeError('...', '...', 0)), +]) +@pytest.mark.usefixtures('no_sslproto_warnings') +async def test_webhookserver_errors( + certfile, pkeyfile, responder, adm_request, code, error): + responder.fut.set_exception(error()) + server = WebhookServer(certfile=certfile, pkeyfile=pkeyfile) + async for client_config in server(responder.fn): + cadata = base64.b64decode(client_config['caBundle']).decode('ascii') + sslctx = ssl.create_default_context(cadata=cadata) + async with aiohttp.ClientSession() as client: + async with client.post(client_config['url'], ssl=sslctx, json=adm_request) as resp: + assert resp.status == code + break # do not sleep diff --git a/tests/basic-structs/test_containers.py b/tests/basic-structs/test_memories.py similarity index 100% rename from tests/basic-structs/test_containers.py rename to tests/basic-structs/test_memories.py diff --git a/tests/conftest.py b/tests/conftest.py index 995c78c6..609a00b8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -492,6 +492,22 @@ def any_pykube(request): else: yield from _with_module_absent('pykube') + +@pytest.fixture() +def no_pyngrok(): + yield from _with_module_absent('pyngrok') + + +@pytest.fixture() +def no_oscrypto(): + yield from _with_module_absent('oscrypto') + + +@pytest.fixture() +def no_certbuilder(): + yield from _with_module_absent('certbuilder') + + # # Helpers for the timing checks. # diff --git a/tests/invocations/test_kwargs.py b/tests/invocations/test_kwargs.py index e0a71367..fb356b9b 100644 --- a/tests/invocations/test_kwargs.py +++ b/tests/invocations/test_kwargs.py @@ -2,8 +2,9 @@ import pytest -from kopf.reactor.causation import ActivityCause, DaemonCause, ResourceChangingCause, \ - ResourceSpawningCause, ResourceWatchingCause +from kopf.reactor.causation import ActivityCause, DaemonCause, \ + ResourceChangingCause, ResourceSpawningCause, \ + ResourceWatchingCause, ResourceWebhookCause from kopf.reactor.indexing import OperatorIndexer, OperatorIndexers from kopf.reactor.invocation import build_kwargs from kopf.structs.bodies import Body, BodyEssence @@ -61,6 +62,56 @@ def test_startup_kwargs(resource, activity, indices): assert kwargs['settings'] is cause.settings +def test_resource_admission_kwargs(resource, indices): + body = {'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1', + 'labels': {'l1': 'v1'}, 'annotations': {'a1': 'v1'}}, + 'spec': {'field': 'value'}, + 'status': {'info': 'payload'}} + cause = ResourceWebhookCause( + logger=logging.getLogger('kopf.test.fake.logger'), + indices=indices, + resource=resource, + patch=Patch(), + memo=Memo(), + body=Body(body), + dryrun=False, + headers={'k1': 'v1'}, + sslpeer={'k2': 'v2'}, + userinfo={'k3': 'v3'}, + warnings=['w1'], + webhook=None, + reason=None, + operation=None, + ) + kwargs = build_kwargs(cause=cause, extrakwarg=123) + assert set(kwargs) == {'extrakwarg', 'logger', 'index1', 'index2', 'resource', + 'dryrun', 'headers', 'sslpeer', 'userinfo', 'warnings', + 'patch', 'memo', + 'body', 'spec', 'status', 'meta', 'uid', 'name', 'namespace', + 'labels', 'annotations'} + assert kwargs['extrakwarg'] == 123 + assert kwargs['resource'] is cause.resource + assert kwargs['index1'] is indices['index1'] + assert kwargs['index2'] is indices['index2'] + assert kwargs['logger'] is cause.logger + assert kwargs['dryrun'] is cause.dryrun + assert kwargs['headers'] is cause.headers + assert kwargs['sslpeer'] is cause.sslpeer + assert kwargs['userinfo'] is cause.userinfo + assert kwargs['warnings'] is cause.warnings + assert kwargs['patch'] is cause.patch + assert kwargs['memo'] is cause.memo + assert kwargs['body'] is cause.body + assert kwargs['spec'] is cause.body.spec + assert kwargs['meta'] is cause.body.metadata + assert kwargs['status'] is cause.body.status + assert kwargs['labels'] is cause.body.metadata.labels + assert kwargs['annotations'] is cause.body.metadata.annotations + assert kwargs['uid'] == cause.body.metadata.uid + assert kwargs['name'] == cause.body.metadata.name + assert kwargs['namespace'] == cause.body.metadata.namespace + + def test_resource_watching_kwargs(resource, indices): body = {'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1', 'labels': {'l1': 'v1'}, 'annotations': {'a1': 'v1'}}, diff --git a/tests/k8s/test_creating.py b/tests/k8s/test_creating.py new file mode 100644 index 00000000..5042766f --- /dev/null +++ b/tests/k8s/test_creating.py @@ -0,0 +1,57 @@ +import aiohttp.web +import pytest + +from kopf.clients.creating import create_obj +from kopf.clients.errors import APIError + + +async def test_simple_body_with_arguments( + resp_mocker, aresponses, hostname, resource, namespace, caplog): + + post_mock = resp_mocker(return_value=aiohttp.web.json_response({})) + aresponses.add(hostname, resource.get_url(namespace=namespace), 'post', post_mock) + + body = {'x': 'y'} + await create_obj(resource=resource, namespace=namespace, name='name1', body=body) + + assert post_mock.called + assert post_mock.call_count == 1 + + data = post_mock.call_args_list[0][0][0].data # [callidx][args/kwargs][argidx] + if resource.namespaced: + assert data == {'x': 'y', 'metadata': {'name': 'name1', 'namespace': 'ns'}} + else: + assert data == {'x': 'y', 'metadata': {'name': 'name1'}} + + +async def test_full_body_with_identifiers( + resp_mocker, aresponses, hostname, resource, namespace, caplog): + + post_mock = resp_mocker(return_value=aiohttp.web.json_response({})) + aresponses.add(hostname, resource.get_url(namespace=namespace), 'post', post_mock) + + body = {'x': 'y', 'metadata': {'name': 'name1', 'namespace': namespace}} + await create_obj(resource=resource, body=body) + + assert post_mock.called + assert post_mock.call_count == 1 + + data = post_mock.call_args_list[0][0][0].data # [callidx][args/kwargs][argidx] + assert data == {'x': 'y', 'metadata': {'name': 'name1', 'namespace': namespace}} + + +@pytest.mark.parametrize('status', [400, 401, 403, 404, 409, 500, 666]) +async def test_raises_api_errors( + resp_mocker, aresponses, hostname, status, resource, namespace, + cluster_resource, namespaced_resource): + + post_mock = resp_mocker(return_value=aresponses.Response(status=status)) + cluster_url = cluster_resource.get_url(namespace=None) + namespaced_url = namespaced_resource.get_url(namespace='ns') + aresponses.add(hostname, cluster_url, 'post', post_mock) + aresponses.add(hostname, namespaced_url, 'post', post_mock) + + body = {'x': 'y'} + with pytest.raises(APIError) as e: + await create_obj(resource=resource, namespace=namespace, name='name1', body=body) + assert e.value.status == status diff --git a/tests/k8s/test_errors.py b/tests/k8s/test_errors.py index 320d8dab..dc1c7af4 100644 --- a/tests/k8s/test_errors.py +++ b/tests/k8s/test_errors.py @@ -2,8 +2,8 @@ import pytest from kopf.clients.auth import APIContext, reauthenticated_request -from kopf.clients.errors import APIError, APIForbiddenError, APINotFoundError, \ - APIUnauthorizedError, check_response +from kopf.clients.errors import APIConflictError, APIError, APIForbiddenError, \ + APINotFoundError, APIUnauthorizedError, check_response @reauthenticated_request @@ -52,6 +52,7 @@ async def test_no_error_on_success( (401, APIUnauthorizedError), (403, APIForbiddenError), (404, APINotFoundError), + (409, APIConflictError), (500, APIError), (666, APIError), ]) diff --git a/tests/observation/test_processing_of_resources.py b/tests/observation/test_processing_of_resources.py index b61352ae..2e6a94d4 100644 --- a/tests/observation/test_processing_of_resources.py +++ b/tests/observation/test_processing_of_resources.py @@ -86,6 +86,7 @@ def group1_404mock(resp_mocker, aresponses, hostname, apis_mock): @pytest.fixture(params=[ kopf.on.event, kopf.daemon, kopf.timer, kopf.index, kopf.on.resume, kopf.on.create, kopf.on.update, kopf.on.delete, + kopf.on.validate, kopf.on.mutate, ]) def handlers(request, registry): @request.param('group1', 'version1', 'plural1') diff --git a/tests/observation/test_revision_of_resources.py b/tests/observation/test_revision_of_resources.py index 9f4a3f74..5b44567a 100644 --- a/tests/observation/test_revision_of_resources.py +++ b/tests/observation/test_revision_of_resources.py @@ -10,6 +10,7 @@ @pytest.fixture(params=[ kopf.on.event, kopf.daemon, kopf.timer, kopf.index, kopf.on.resume, kopf.on.create, kopf.on.update, kopf.on.delete, + kopf.on.validate, kopf.on.mutate, ]) def handlers(request, registry): @request.param('group1', 'version1', 'plural1') @@ -58,6 +59,7 @@ def test_replacing_a_new_group(registry): @pytest.mark.parametrize('decorator', [ kopf.on.event, kopf.daemon, kopf.timer, kopf.index, kopf.on.resume, kopf.on.create, kopf.on.update, kopf.on.delete, + kopf.on.validate, kopf.on.mutate, ]) def test_ambiguity_in_specific_selectors(registry, decorator, caplog, assert_logs): r1 = Resource(group='g1', version='v1', plural='plural', verbs=VERBS) @@ -75,6 +77,7 @@ def fn(**_): ... @pytest.mark.parametrize('decorator', [ kopf.on.event, kopf.daemon, kopf.timer, kopf.index, kopf.on.resume, kopf.on.create, kopf.on.update, kopf.on.delete, + kopf.on.validate, kopf.on.mutate, ]) def test_corev1_overrides_ambuigity(registry, decorator, caplog, assert_logs): r1 = Resource(group='', version='v1', plural='pods', verbs=VERBS) @@ -92,6 +95,7 @@ def fn(**_): ... @pytest.mark.parametrize('decorator', [ kopf.on.event, kopf.daemon, kopf.timer, kopf.index, kopf.on.resume, kopf.on.create, kopf.on.update, kopf.on.delete, + kopf.on.validate, kopf.on.mutate, ]) def test_no_ambiguity_in_generic_selector(registry, decorator, caplog, assert_logs): r1 = Resource(group='g1', version='v1', plural='plural', verbs=VERBS) @@ -109,6 +113,7 @@ def fn(**_): ... @pytest.mark.parametrize('decorator', [ kopf.on.event, kopf.daemon, kopf.timer, kopf.index, kopf.on.resume, kopf.on.create, kopf.on.update, kopf.on.delete, + kopf.on.validate, kopf.on.mutate, ]) def test_selectors_with_no_resources(registry, decorator, caplog, assert_logs): r1 = Resource(group='group1', version='version1', plural='plural1', verbs=VERBS) diff --git a/tests/primitives/test_conditions.py b/tests/primitives/test_conditions.py new file mode 100644 index 00000000..e5f1e87d --- /dev/null +++ b/tests/primitives/test_conditions.py @@ -0,0 +1,48 @@ +import asyncio + +import async_timeout +import pytest + +from kopf.structs.primitives import condition_chain + + +async def test_no_triggering(): + source = asyncio.Condition() + target = asyncio.Condition() + task = asyncio.create_task(condition_chain(source, target)) + try: + + with pytest.raises(asyncio.TimeoutError): + with async_timeout.timeout(0.1) as timeout: + async with target: + await target.wait() + + assert timeout.expired + + finally: + task.cancel() + await asyncio.wait([task]) + + +async def test_triggering(event_loop, timer): + source = asyncio.Condition() + target = asyncio.Condition() + task = asyncio.create_task(condition_chain(source, target)) + try: + + async def delayed_trigger(): + async with source: + source.notify_all() + + event_loop.call_later(0.1, asyncio.create_task, delayed_trigger()) + + with timer, async_timeout.timeout(10) as timeout: + async with target: + await target.wait() + + assert not timeout.expired + assert 0.1 <= timer.seconds <= 0.2 + + finally: + task.cancel() + await asyncio.wait([task]) diff --git a/tests/primitives/test_containers.py b/tests/primitives/test_containers.py new file mode 100644 index 00000000..6a5d555b --- /dev/null +++ b/tests/primitives/test_containers.py @@ -0,0 +1,93 @@ +import asyncio + +import async_timeout +import pytest + +from kopf.structs.primitives import Container + + +async def test_empty_by_default(): + container = Container() + with pytest.raises(asyncio.TimeoutError): + with async_timeout.timeout(0.1) as timeout: + await container.wait() + assert timeout.expired + + +async def test_does_not_wake_up_when_reset(event_loop, timer): + container = Container() + + async def reset_it(): + await container.reset() + + event_loop.call_later(0.05, asyncio.create_task, reset_it()) + + with pytest.raises(asyncio.TimeoutError): + with async_timeout.timeout(0.1) as timeout: + await container.wait() + + assert timeout.expired + + +async def test_wakes_up_when_preset(event_loop, timer): + container = Container() + await container.set(123) + + with timer, async_timeout.timeout(10) as timeout: + result = await container.wait() + + assert not timeout.expired + assert timer.seconds <= 0.1 + assert result == 123 + + +async def test_wakes_up_when_set(event_loop, timer): + container = Container() + + async def set_it(): + await container.set(123) + + event_loop.call_later(0.1, asyncio.create_task, set_it()) + + with timer, async_timeout.timeout(10) as timeout: + result = await container.wait() + + assert not timeout.expired + assert 0.1 <= timer.seconds <= 0.2 + assert result == 123 + + +async def test_iterates_when_set(event_loop, timer): + container = Container() + + async def set_it(v): + await container.set(v) + + event_loop.call_later(0.1, asyncio.create_task, set_it(123)) + event_loop.call_later(0.2, asyncio.create_task, set_it(234)) + + values = [] + with timer, async_timeout.timeout(10) as timeout: + async for value in container.as_changed(): + values.append(value) + if value == 234: + break + + assert not timeout.expired + assert 0.2 <= timer.seconds <= 0.3 + assert values == [123, 234] + + +async def test_iterates_when_preset(event_loop, timer): + container = Container() + await container.set(123) + + values = [] + with timer, async_timeout.timeout(10) as timeout: + async for value in container.as_changed(): + values.append(value) + break + + assert not timeout.expired + assert timer.seconds <= 0.1 + assert values == [123] diff --git a/tests/registries/conftest.py b/tests/registries/conftest.py index 296dca8e..7910bb87 100644 --- a/tests/registries/conftest.py +++ b/tests/registries/conftest.py @@ -2,12 +2,13 @@ import pytest -from kopf.reactor.causation import ActivityCause, ResourceCause, ResourceChangingCause, \ - ResourceSpawningCause, ResourceWatchingCause +from kopf.reactor.causation import ActivityCause, ResourceCause, \ + ResourceChangingCause, ResourceSpawningCause, \ + ResourceWatchingCause, ResourceWebhookCause from kopf.reactor.indexing import OperatorIndexers from kopf.reactor.registries import ActivityRegistry, OperatorRegistry, ResourceChangingRegistry, \ ResourceRegistry, ResourceSpawningRegistry, \ - ResourceWatchingRegistry + ResourceWatchingRegistry, ResourceWebhooksRegistry from kopf.structs.bodies import Body from kopf.structs.diffs import Diff, DiffItem from kopf.structs.ephemera import Memo @@ -147,5 +148,22 @@ def make_cause( body=Body(body if body is not None else {}), reset=False, ) + if cls is ResourceWebhookCause or cls is ResourceWebhooksRegistry: + return ResourceWebhookCause( + logger=logging.getLogger('kopf.test.fake.logger'), + indices=OperatorIndexers().indices, + resource=resource, + patch=Patch(), + memo=Memo(), + body=Body(body if body is not None else {}), + dryrun=False, + sslpeer={}, + headers={}, + userinfo={}, + warnings=[], + reason=None, + webhook=None, + operation=None, + ) raise TypeError(f"Cause/registry type {cls} is not supported by this fixture.") return make_cause diff --git a/tests/registries/test_decorators.py b/tests/registries/test_decorators.py index fa979465..1b752cb8 100644 --- a/tests/registries/test_decorators.py +++ b/tests/registries/test_decorators.py @@ -1,6 +1,7 @@ import pytest import kopf +from kopf.reactor.causation import ResourceWebhookCause from kopf.reactor.handling import handler_var, subregistry_var from kopf.reactor.invocation import context from kopf.reactor.registries import OperatorRegistry, ResourceChangingRegistry @@ -492,6 +493,8 @@ def fn(**_): @pytest.mark.parametrize('decorator, kwargs', [ (kopf.index, {}), (kopf.on.event, {}), + (kopf.on.mutate, {}), + (kopf.on.validate, {}), (kopf.on.resume, {}), (kopf.on.create, {}), (kopf.on.update, {}), @@ -509,6 +512,8 @@ def fn(**_): @pytest.mark.parametrize('decorator, kwargs', [ (kopf.index, {}), (kopf.on.event, {}), + (kopf.on.mutate, {}), + (kopf.on.validate, {}), (kopf.on.resume, {}), (kopf.on.create, {}), (kopf.on.update, {}), @@ -526,6 +531,8 @@ def fn(**_): @pytest.mark.parametrize('decorator, causeargs, handlers_prop', [ pytest.param(kopf.index, dict(), '_resource_indexing', id='on-index'), pytest.param(kopf.on.event, dict(), '_resource_watching', id='on-event'), + pytest.param(kopf.on.mutate, dict(cls=ResourceWebhookCause), '_resource_webhooks', id='on-mutation'), + pytest.param(kopf.on.validate, dict(cls=ResourceWebhookCause), '_resource_webhooks', id='on-validation'), pytest.param(kopf.on.resume, dict(reason=None, initial=True), '_resource_changing', id='on-resume'), pytest.param(kopf.on.create, dict(reason=Reason.CREATE), '_resource_changing', id='on-create'), pytest.param(kopf.on.update, dict(reason=Reason.UPDATE), '_resource_changing', id='on-update'), @@ -575,6 +582,8 @@ def fn(**_): @pytest.mark.parametrize('decorator', [ pytest.param(kopf.index, id='on-index'), pytest.param(kopf.on.event, id='on-event'), + pytest.param(kopf.on.mutate, id='on-mutation'), + pytest.param(kopf.on.validate, id='on-validation'), pytest.param(kopf.on.resume, id='on-resume'), pytest.param(kopf.on.create, id='on-create'), pytest.param(kopf.on.update, id='on-update'), diff --git a/tests/settings/test_defaults.py b/tests/settings/test_defaults.py index c95d507c..3c64997d 100644 --- a/tests/settings/test_defaults.py +++ b/tests/settings/test_defaults.py @@ -24,6 +24,8 @@ async def test_declared_public_interface_and_promised_defaults(): assert settings.batching.batch_window == 0.1 assert settings.batching.error_delays == (1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610) assert settings.scanning.disabled == False + assert settings.admission.server is None + assert settings.admission.managed is None assert settings.execution.executor is not None assert settings.execution.max_workers is None