diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 5e44d22c4..16b9144f7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -20,7 +20,10 @@ "ghcr.io/devcontainers/features/kubectl-helm-minikube:1": { "minikube": "none" }, - "ghcr.io/eitsupi/devcontainer-features/jq-likes:2": {}, + "ghcr.io/eitsupi/devcontainer-features/jq-likes:2": { + "jqVersion": "latest", + "yqVersion": "latest" + }, "ghcr.io/dhoeric/features/k9s:1": {}, "ghcr.io/EliiseS/devcontainer-features/bash-profile:1": { "command": "alias k=kubectl" diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 08dca9ca0..1dc6a27a1 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -23,6 +23,8 @@ services: ZED_TOKEN: renku ZED_INSECURE: "true" POETRY_CACHE_DIR: "/poetry_cache" + NB_SERVER_OPTIONS__DEFAULTS_PATH: /workspace/server_defaults.json + NB_SERVER_OPTIONS__UI_CHOICES_PATH: /workspace/server_options.json network_mode: service:db depends_on: - db @@ -43,6 +45,7 @@ services: - "8080:8080" - "5678:5678" - "50051:50051" + - "8888:80" swagger: image: swaggerapi/swagger-ui diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index e3b14c4e4..b254c5b89 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -25,6 +25,8 @@ jobs: renku-graph: ${{ steps.deploy-comment.outputs.renku-graph}} renku-notebooks: ${{ steps.deploy-comment.outputs.renku-notebooks}} renku-ui: ${{ steps.deploy-comment.outputs.renku-ui}} + amalthea-sessions: ${{ steps.deploy-comment.outputs.amalthea-sessions}} + amalthea: ${{ steps.deploy-comment.outputs.amalthea}} test-enabled: ${{ steps.deploy-comment.outputs.test-enabled}} test-cypress-enabled: ${{ steps.deploy-comment.outputs.test-cypress-enabled}} persist: ${{ steps.deploy-comment.outputs.persist}} @@ -84,6 +86,8 @@ jobs: renku_graph: "${{ needs.check-deploy.outputs.renku-graph }}" renku_notebooks: "${{ needs.check-deploy.outputs.renku-notebooks }}" renku_data_services: "@${{ github.head_ref }}" + amalthea: "${{ needs.check-deploy.outputs.amalthea }}" + amalthea_sessions: "${{ needs.check-deploy.outputs.amalthea-sessions }}" extra_values: "${{ needs.check-deploy.outputs.extra-values }}" selenium-acceptance-tests: diff --git a/.github/workflows/save_cache.yml b/.github/workflows/save_cache.yml new file mode 100644 index 000000000..1230d6b58 --- /dev/null +++ b/.github/workflows/save_cache.yml @@ -0,0 +1,41 @@ +name: Create cache from commits on main + +on: + push: + branches: + - main + - chore-add-kind + workflow_dispatch: + + +jobs: + save-poetry-cache: + runs-on: ubuntu-latest + env: + CACHE_KEY: main-branch-poetry-cache-ubuntu + CACHE_PATH: .devcontainer/.poetry_cache + DEVCONTAINER_IMAGE_CACHE: ghcr.io/swissdatasciencecenter/renku-data-services/devcontainer + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Install python deps + uses: devcontainers/ci@v0.3 + with: + runCmd: poetry install --with dev + push: always + skipContainerUserIdUpdate: false + imageName: ${{ env.DEVCONTAINER_IMAGE_CACHE }} + cacheFrom: ${{ env.DEVCONTAINER_IMAGE_CACHE }} + - uses: actions/cache/save@v3 + name: Create cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} diff --git a/.github/workflows/test_publish.yml b/.github/workflows/test_publish.yml index e34758384..3e3c16bd8 100644 --- a/.github/workflows/test_publish.yml +++ b/.github/workflows/test_publish.yml @@ -70,6 +70,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | @@ -111,6 +116,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | @@ -155,6 +165,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | diff --git a/Makefile b/Makefile index 5b453b0a7..3f220d2e0 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ .PHONY: schemas tests test_setup main_tests schemathesis_tests collect_coverage style_checks pre_commit_checks run download_avro check_avro avro_models update_avro kind_cluster install_amaltheas all -AMALTHEA_JS_VERSION ?= 0.11.0 -AMALTHEA_SESSIONS_VERSION ?= 0.0.1-new-operator-chart +AMALTHEA_JS_VERSION ?= 0.12.2 +AMALTHEA_SESSIONS_VERSION ?= 0.0.9-new-operator-chart codegen_params = --input-file-type openapi --output-model-type pydantic_v2.BaseModel --use-double-quotes --target-python-version 3.12 --collapse-root-models --field-constraints --strict-nullable --set-default-enum-member --openapi-scopes schemas paths parameters --set-default-enum-member --use-one-literal-as-default --use-default define test_apispec_up_to_date @@ -153,7 +153,13 @@ kind_cluster: ## Creates a kind cluster for testing sleep 15 kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s -install_amaltheas: ## Installs both version of amalthea in the currently active k8s context. +install_amaltheas: ## Installs both version of amalthea in the. NOTE: It uses the currently active k8s context. helm repo add renku https://swissdatasciencecenter.github.io/helm-charts - helm install amalthea-js renku/amalthea --version $(AMALTHEA_JS_VERSION) - helm install amalthea-sessions renku/amalthea-sessions --version $(AMALTHEA_SESSIONS_VERSION) + helm repo update + helm upgrade --install amalthea-js renku/amalthea --version $(AMALTHEA_JS_VERSION) + helm upgrade --install amalthea-sessions amalthea-sessions-0.0.9-new-operator-chart.tgz --version $(AMALTHEA_SESSIONS_VERSION) + +# TODO: Add the version variables from the top of the file here when the charts are fully published +amalthea_schema: ## Updates generates pydantic classes from CRDs + curl https://raw.githubusercontent.com/SwissDataScienceCenter/amalthea/feat-add-cloud-storage/config/crd/bases/amalthea.dev_amaltheasessions.yaml | yq '.spec.versions[0].schema.openAPIV3Schema' | poetry run datamodel-codegen --input-file-type jsonschema --output-model-type pydantic_v2.BaseModel --output components/renku_data_services/notebooks/cr_amalthea_session.py --use-double-quotes --target-python-version 3.12 --collapse-root-models --field-constraints --strict-nullable --base-class renku_data_services.notebooks.cr_base.BaseCRD --allow-extra-fields --use-default-kwarg + curl https://raw.githubusercontent.com/SwissDataScienceCenter/amalthea/main/controller/crds/jupyter_server.yaml | yq '.spec.versions[0].schema.openAPIV3Schema' | poetry run datamodel-codegen --input-file-type jsonschema --output-model-type pydantic_v2.BaseModel --output components/renku_data_services/notebooks/cr_jupyter_server.py --use-double-quotes --target-python-version 3.12 --collapse-root-models --field-constraints --strict-nullable --base-class renku_data_services.notebooks.cr_base.BaseCRD --allow-extra-fields --use-default-kwarg diff --git a/bases/renku_data_services/data_api/app.py b/bases/renku_data_services/data_api/app.py index 2765aa542..aa3cfd116 100644 --- a/bases/renku_data_services/data_api/app.py +++ b/bases/renku_data_services/data_api/app.py @@ -16,6 +16,7 @@ UserResourcePoolsBP, ) from renku_data_services.namespace.blueprints import GroupsBP +from renku_data_services.notebooks.blueprints import NotebooksBP, NotebooksNewBP from renku_data_services.platform.blueprints import PlatformConfigBP from renku_data_services.project.blueprints import ProjectsBP from renku_data_services.repositories.blueprints import RepositoriesBP @@ -134,6 +135,25 @@ def register_all_handlers(app: Sanic, config: Config) -> Sanic: authenticator=config.authenticator, internal_gitlab_authenticator=config.gitlab_authenticator, ) + notebooks = NotebooksBP( + name="notebooks_old", + url_prefix=url_prefix, + authenticator=config.authenticator, + nb_config=config.nb_config, + internal_gitlab_authenticator=config.gitlab_authenticator, + git_repo=config.git_repositories_repo, + ) + notebooks_new = NotebooksNewBP( + name="notebooks", + url_prefix=url_prefix, + authenticator=config.authenticator, + nb_config=config.nb_config, + project_repo=config.project_repo, + session_repo=config.session_repo, + storage_repo=config.storage_v2_repo, + rp_repo=config.rp_repo, + internal_gitlab_authenticator=config.gitlab_authenticator, + ) platform_config = PlatformConfigBP( name="platform_config", url_prefix=url_prefix, @@ -161,6 +181,8 @@ def register_all_handlers(app: Sanic, config: Config) -> Sanic: oauth2_clients.blueprint(), oauth2_connections.blueprint(), repositories.blueprint(), + notebooks.blueprint(), + notebooks_new.blueprint(), platform_config.blueprint(), ] ) diff --git a/components/renku_data_services/app_config/config.py b/components/renku_data_services/app_config/config.py index 7e988ab2c..fa2873eff 100644 --- a/components/renku_data_services/app_config/config.py +++ b/components/renku_data_services/app_config/config.py @@ -52,6 +52,7 @@ from renku_data_services.message_queue.interface import IMessageQueue from renku_data_services.message_queue.redis_queue import RedisQueue from renku_data_services.namespace.db import GroupRepository +from renku_data_services.notebooks.config import _NotebooksConfig from renku_data_services.platform.db import PlatformRepository from renku_data_services.project.db import ProjectMemberRepository, ProjectRepository from renku_data_services.repositories.db import GitRepositoriesRepository @@ -144,6 +145,7 @@ class Config: kc_api: IKeycloakAPI message_queue: IMessageQueue gitlab_url: str | None + nb_config: _NotebooksConfig secrets_service_public_key: rsa.RSAPublicKey """The public key of the secrets service, used to encrypt user secrets that only it can decrypt.""" @@ -208,6 +210,10 @@ def __post_init__(self) -> None: with open(spec_file) as f: repositories = safe_load(f) + spec_file = Path(renku_data_services.notebooks.__file__).resolve().parent / "api.spec.yaml" + with open(spec_file) as f: + repositories = safe_load(f) + spec_file = Path(renku_data_services.platform.__file__).resolve().parent / "api.spec.yaml" with open(spec_file) as f: platform = safe_load(f) @@ -408,8 +414,8 @@ def from_env(cls, prefix: str = "") -> "Config": gitlab_client: base_models.GitlabAPIProtocol user_preferences_config: UserPreferencesConfig version = os.environ.get(f"{prefix}VERSION", "0.0.1") - server_options_file = os.environ.get("SERVER_OPTIONS") - server_defaults_file = os.environ.get("SERVER_DEFAULTS") + server_options_file = os.environ.get("NB_SERVER_OPTIONS__UI_CHOICES_PATH") + server_defaults_file = os.environ.get("NB_SERVER_OPTIONS__DEFAULTS_PATH") k8s_namespace = os.environ.get("K8S_NAMESPACE", "default") max_pinned_projects = int(os.environ.get(f"{prefix}MAX_PINNED_PROJECTS", "10")) user_preferences_config = UserPreferencesConfig(max_pinned_projects=max_pinned_projects) @@ -491,6 +497,7 @@ def from_env(cls, prefix: str = "") -> "Config": sentry = SentryConfig.from_env(prefix) trusted_proxies = TrustedProxiesConfig.from_env(prefix) message_queue = RedisQueue(redis) + nb_config = _NotebooksConfig.from_env(db) return cls( version=version, @@ -511,4 +518,5 @@ def from_env(cls, prefix: str = "") -> "Config": encryption_key=encryption_key, secrets_service_public_key=secrets_service_public_key, gitlab_url=gitlab_url, + nb_config=nb_config, ) diff --git a/components/renku_data_services/authn/dummy.py b/components/renku_data_services/authn/dummy.py index 6da7b09b9..d10acedb0 100644 --- a/components/renku_data_services/authn/dummy.py +++ b/components/renku_data_services/authn/dummy.py @@ -7,6 +7,7 @@ from typing import Optional from sanic import Request +from ulid import ULID import renku_data_services.base_models as base_models @@ -39,10 +40,22 @@ class DummyAuthenticator: """ token_field = "Authorization" # nosec: B105 + anon_id_header_key: str = "Renku-Auth-Anon-Id" + anon_id_cookie_name: str = "Renku-Auth-Anon-Id" - @staticmethod - async def authenticate(access_token: str, request: Request) -> base_models.APIUser: + async def authenticate(self, access_token: str, request: Request) -> base_models.APIUser: """Indicates whether the user has successfully logged in.""" + access_token = request.headers.get(self.token_field) or "" + if not access_token or len(access_token) == 0: + # Try to get an anonymous user ID if the validation of keycloak credentials failed + anon_id = request.headers.get(self.anon_id_header_key) + if anon_id is None: + anon_id = request.cookies.get(self.anon_id_cookie_name) + if anon_id is None: + anon_id = f"anon-{str(ULID())}" + return base_models.AnonymousAPIUser(id=str(anon_id)) + + access_token = access_token.removeprefix("Bearer ").removeprefix("bearer ") user_props = {} with contextlib.suppress(Exception): user_props = json.loads(access_token) @@ -64,4 +77,5 @@ async def authenticate(access_token: str, request: Request) -> base_models.APIUs last_name=user_props.get("last_name", "Doe") if is_set else None, email=user_props.get("email", "john.doe@gmail.com") if is_set else None, full_name=user_props.get("full_name", "John Doe") if is_set else None, + refresh_token=request.headers.get("Renku-Auth-Refresh-Token"), ) diff --git a/components/renku_data_services/authn/gitlab.py b/components/renku_data_services/authn/gitlab.py index 6b8436a17..2a7d4cc83 100644 --- a/components/renku_data_services/authn/gitlab.py +++ b/components/renku_data_services/authn/gitlab.py @@ -2,10 +2,13 @@ import contextlib import urllib.parse as parse +from contextlib import suppress from dataclasses import dataclass +from datetime import datetime import gitlab from sanic import Request +from sanic.compat import Header import renku_data_services.base_models as base_models from renku_data_services import errors @@ -23,6 +26,7 @@ class GitlabAuthenticator: gitlab_url: str token_field: str = "Gitlab-Access-Token" + expires_at_field: str = "Gitlab-Access-Token-Expires-At" def __post_init__(self) -> None: """Properly set gitlab url.""" @@ -36,10 +40,10 @@ async def authenticate(self, access_token: str, request: Request) -> base_models if self.token_field != "Authorization": # nosec: B105 access_token = str(request.headers.get(self.token_field)) - result = await self._get_gitlab_api_user(access_token) + result = await self._get_gitlab_api_user(access_token, request.headers) return result - async def _get_gitlab_api_user(self, access_token: str) -> base_models.APIUser: + async def _get_gitlab_api_user(self, access_token: str, headers: Header) -> base_models.APIUser: """Get and validate a Gitlab API User.""" client = gitlab.Gitlab(self.gitlab_url, oauth_token=access_token) try: @@ -69,12 +73,18 @@ async def _get_gitlab_api_user(self, access_token: str) -> base_models.APIUser: if len(name_parts) >= 1: last_name = " ".join(name_parts) + expires_at: datetime | None = None + expires_at_raw: str | None = headers.get(self.expires_at_field) + if expires_at_raw is not None and len(expires_at_raw) > 0: + with suppress(ValueError): + expires_at = datetime.fromtimestamp(float(expires_at_raw)) + return base_models.APIUser( - is_admin=False, id=str(user_id), access_token=access_token, first_name=first_name, last_name=last_name, email=email, full_name=full_name, + access_token_expires_at=expires_at, ) diff --git a/components/renku_data_services/authn/keycloak.py b/components/renku_data_services/authn/keycloak.py index 9c71fc728..bdca34708 100644 --- a/components/renku_data_services/authn/keycloak.py +++ b/components/renku_data_services/authn/keycloak.py @@ -1,15 +1,19 @@ """Keycloak user store.""" +from contextlib import suppress from dataclasses import dataclass +from datetime import datetime from typing import Any, Optional, cast import httpx import jwt from jwt import PyJWKClient from sanic import Request +from ulid import ULID import renku_data_services.base_models as base_models from renku_data_services import errors +from renku_data_services.base_models.core import Authenticator from renku_data_services.utils.core import get_ssl_context @@ -34,44 +38,83 @@ async def get_user_by_id(self, id: str, access_token: str) -> Optional[base_mode @dataclass -class KeycloakAuthenticator: +class KeycloakAuthenticator(Authenticator): """Authenticator for JWT access tokens from Keycloak.""" jwks: PyJWKClient algorithms: list[str] admin_role: str = "renku-admin" token_field: str = "Authorization" + refresh_token_header: str = "Renku-Auth-Refresh-Token" + anon_id_header_key: str = "Renku-Auth-Anon-Id" + anon_id_cookie_name: str = "Renku-Auth-Anon-Id" def __post_init__(self) -> None: if len(self.algorithms) == 0: raise errors.ConfigurationError(message="At least one algorithm for token validation has to be specified.") def _validate(self, token: str) -> dict[str, Any]: - sk = self.jwks.get_signing_key_from_jwt(token) - return cast( - dict[str, Any], - jwt.decode( - token, - key=sk.key, - algorithms=self.algorithms, - audience=["renku", "renku-ui", "renku-cli", "swagger"], - verify=True, - ), - ) - - async def authenticate(self, access_token: str, request: Request) -> base_models.APIUser: + try: + sk = self.jwks.get_signing_key_from_jwt(token) + return cast( + dict[str, Any], + jwt.decode( + token, + key=sk.key, + algorithms=self.algorithms, + audience=["renku", "renku-ui", "renku-cli", "swagger"], + verify=True, + ), + ) + except (jwt.InvalidSignatureError, jwt.MissingRequiredClaimError): + # NOTE: the above errors are subclasses of `InvalidToken` below but they will result from keycloak + # misconfiguration most often rather than from the user having done something so we surface them. + raise + except jwt.InvalidTokenError: + raise errors.UnauthorizedError( + message="Your credentials are invalid or expired, please log in again.", quiet=True + ) + + async def authenticate( + self, access_token: str, request: Request + ) -> base_models.AuthenticatedAPIUser | base_models.AnonymousAPIUser: """Checks the validity of the access token.""" - if self.token_field != "Authorization": # nosec: B105 - access_token = str(request.headers.get(self.token_field)) - - parsed = self._validate(access_token) - is_admin = self.admin_role in parsed.get("realm_access", {}).get("roles", []) - return base_models.APIUser( - is_admin=is_admin, - id=parsed.get("sub"), - access_token=access_token, - full_name=parsed.get("name"), - first_name=parsed.get("given_name"), - last_name=parsed.get("family_name"), - email=parsed.get("email"), - ) + header_value = str(request.headers.get(self.token_field)) + refresh_token = request.headers.get(self.refresh_token_header) + user: base_models.AuthenticatedAPIUser | base_models.AnonymousAPIUser | None = None + + # Try to get the authorization header for a fully authenticated user + with suppress(errors.UnauthorizedError, jwt.InvalidTokenError): + token = str(header_value).removeprefix("Bearer ").removeprefix("bearer ") + parsed = self._validate(token) + is_admin = self.admin_role in parsed.get("realm_access", {}).get("roles", []) + exp = parsed.get("exp") + id = parsed.get("sub") + email = parsed.get("email") + if id is None or email is None: + raise errors.UnauthorizedError( + message="Your credentials are invalid or expired, please log in again.", quiet=True + ) + user = base_models.AuthenticatedAPIUser( + is_admin=is_admin, + id=id, + access_token=access_token, + full_name=parsed.get("name"), + first_name=parsed.get("given_name"), + last_name=parsed.get("family_name"), + email=email, + refresh_token=str(refresh_token) if refresh_token else None, + access_token_expires_at=datetime.fromtimestamp(exp) if exp is not None else None, + ) + if user is not None: + return user + + # Try to get an anonymous user ID if the validation of keycloak credentials failed + anon_id = request.headers.get(self.anon_id_header_key) + if anon_id is None: + anon_id = request.cookies.get(self.anon_id_cookie_name) + if anon_id is None: + anon_id = f"anon-{str(ULID())}" + user = base_models.AnonymousAPIUser(id=str(anon_id)) + + return user diff --git a/components/renku_data_services/authz/schemas.py b/components/renku_data_services/authz/schemas.py index e43cd1b6e..07b7eb98e 100644 --- a/components/renku_data_services/authz/schemas.py +++ b/components/renku_data_services/authz/schemas.py @@ -45,7 +45,7 @@ def _write_to_db( output.append(res) case _: raise errors.ProgrammingError( - message=f"Found an uknown authorization migration type {type(request)}" + message=f"Found an unknown authorization migration type {type(request)}" ) return output diff --git a/components/renku_data_services/base_api/auth.py b/components/renku_data_services/base_api/auth.py index a16825550..f69a8b97b 100644 --- a/components/renku_data_services/base_api/auth.py +++ b/components/renku_data_services/base_api/auth.py @@ -1,14 +1,15 @@ """Authentication decorators for Sanic.""" +import asyncio import re -from collections.abc import Awaitable, Callable, Coroutine +from collections.abc import Callable, Coroutine from functools import wraps from typing import Any, Concatenate, ParamSpec, TypeVar, cast from sanic import Request from renku_data_services import errors -from renku_data_services.base_models import APIUser, Authenticator +from renku_data_services.base_models import AnyAPIUser, APIUser, Authenticator _T = TypeVar("_T") _P = ParamSpec("_P") @@ -17,7 +18,7 @@ def authenticate( authenticator: Authenticator, ) -> Callable[ - [Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]]], + [Callable[Concatenate[Request, AnyAPIUser, _P], Coroutine[Any, Any, _T]]], Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], ]: """Decorator for a Sanic handler that adds the APIUser model to the context. @@ -26,16 +27,12 @@ def authenticate( """ def decorator( - f: Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]], + f: Callable[Concatenate[Request, AnyAPIUser, _P], Coroutine[Any, Any, _T]], ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: @wraps(f) async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: token = request.headers.get(authenticator.token_field) - user = APIUser() - if token is not None and len(token) >= 8: - token = token.removeprefix("Bearer ").removeprefix("bearer ") - user = await authenticator.authenticate(token, request) - + user = await authenticator.authenticate(token or "", request) response = await f(request, user, *args, **kwargs) return response @@ -44,9 +41,39 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar return decorator +def authenticate_2( + authenticator1: Authenticator, + authenticator2: Authenticator, +) -> Callable[ + [Callable[Concatenate[Request, AnyAPIUser, AnyAPIUser, _P], Coroutine[Any, Any, _T]]], + Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +]: + """Decorator for a Sanic handler that adds the APIUser when another authentication has already been done.""" + + def decorator( + f: Callable[Concatenate[Request, AnyAPIUser, AnyAPIUser, _P], Coroutine[Any, Any, _T]], + ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: + @wraps(f) + async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: + token1 = request.headers.get(authenticator1.token_field) + token2 = request.headers.get(authenticator2.token_field) + user1: AnyAPIUser + user2: AnyAPIUser + [user1, user2] = await asyncio.gather( + authenticator1.authenticate(token1 or "", request), + authenticator2.authenticate(token2 or "", request), + ) + response = await f(request, user1, user2, *args, **kwargs) + return response + + return decorated_function + + return decorator + + def validate_path_project_id( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator for a Sanic handler that validates the project_id path parameter.""" _path_project_id_regex = re.compile(r"^[A-Za-z0-9]{26}$") @@ -69,8 +96,8 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar def validate_path_user_id( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator for a Sanic handler that validates the user_id or member_id path parameter.""" _path_user_id_regex = re.compile(r"^[A-Za-z0-9]{1}[A-Za-z0-9-]+$") @@ -102,8 +129,8 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar def only_admins( - f: Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, APIUser, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, APIUser, _P], Coroutine[Any, Any, _T]]: """Decorator for a Sanic handler that errors out if the user is not an admin.""" @wraps(f) @@ -122,7 +149,7 @@ async def decorated_function(request: Request, user: APIUser, *args: _P.args, ** return decorated_function -def only_authenticated(f: Callable[_P, Awaitable[_T]]) -> Callable[_P, Awaitable[_T]]: +def only_authenticated(f: Callable[_P, Coroutine[Any, Any, _T]]) -> Callable[_P, Coroutine[Any, Any, _T]]: """Decorator that errors out if the user is not authenticated. It looks for APIUser in the named or unnamed parameters. diff --git a/components/renku_data_services/base_api/blueprint.py b/components/renku_data_services/base_api/blueprint.py index f72fcdd16..3a76882c4 100644 --- a/components/renku_data_services/base_api/blueprint.py +++ b/components/renku_data_services/base_api/blueprint.py @@ -3,7 +3,7 @@ from collections.abc import Callable from dataclasses import dataclass, field from inspect import getmembers, ismethod -from typing import Optional, cast +from typing import cast from sanic import Blueprint from sanic.models.handler_types import RequestMiddlewareType, ResponseMiddlewareType, RouteHandler @@ -21,7 +21,7 @@ class CustomBlueprint: """ name: str - url_prefix: Optional[str] = None + url_prefix: str request_middlewares: list[RequestMiddlewareType] = field(default_factory=list, repr=False) response_middlewares: list[ResponseMiddlewareType] = field(default_factory=list, repr=False) diff --git a/components/renku_data_services/base_api/etag.py b/components/renku_data_services/base_api/etag.py index 7becd15ce..575869e4d 100644 --- a/components/renku_data_services/base_api/etag.py +++ b/components/renku_data_services/base_api/etag.py @@ -1,6 +1,6 @@ """Enitity tag decorators for Sanic.""" -from collections.abc import Awaitable, Callable, Coroutine +from collections.abc import Callable, Coroutine from functools import wraps from typing import Any, Concatenate, ParamSpec, TypeVar @@ -13,8 +13,8 @@ def if_match_required( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator that errors out if the "If-Match" header is not present.""" @wraps(f) @@ -31,7 +31,7 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar def extract_if_none_match( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator which extracts the "If-None-Match" header if present.""" diff --git a/components/renku_data_services/base_api/pagination.py b/components/renku_data_services/base_api/pagination.py index 4a5b0b8be..7438283a3 100644 --- a/components/renku_data_services/base_api/pagination.py +++ b/components/renku_data_services/base_api/pagination.py @@ -1,6 +1,6 @@ """Classes and decorators used for paginating long responses.""" -from collections.abc import Awaitable, Callable, Sequence +from collections.abc import Callable, Coroutine, Sequence from functools import wraps from math import ceil from typing import Any, Concatenate, NamedTuple, ParamSpec, cast @@ -57,8 +57,8 @@ def as_header(self) -> dict[str, str]: def paginate( - f: Callable[Concatenate[Request, _P], Awaitable[tuple[Sequence[Any], int]]], -) -> Callable[Concatenate[Request, _P], Awaitable[JSONResponse]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, tuple[Sequence[Any], int]]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, JSONResponse]]: """Serializes the response to JSON and adds the required pagination headers to the response. The handler should return first the list of items and then the total count from the DB. diff --git a/components/renku_data_services/base_models/core.py b/components/renku_data_services/base_models/core.py index b0143d238..1f6a76cbb 100644 --- a/components/renku_data_services/base_models/core.py +++ b/components/renku_data_services/base_models/core.py @@ -3,41 +3,67 @@ import re import unicodedata from dataclasses import dataclass, field +from datetime import datetime from enum import Enum, StrEnum -from typing import ClassVar, Optional, Protocol, Self +from typing import ClassVar, Optional, Protocol, Self, TypeVar from sanic import Request from renku_data_services.errors import errors -class Authenticator(Protocol): - """Interface for authenticating users.""" - - token_field: str - - async def authenticate(self, access_token: str, request: Request) -> "APIUser": - """Validates the user credentials (i.e. we can say that the user is a valid Renku user).""" - ... - - -@dataclass(kw_only=True) +@dataclass(kw_only=True, frozen=True) class APIUser: """The model for a user of the API, used for authentication.""" + id: str | None = None # the sub claim in the access token - i.e. the Keycloak user ID + access_token: str | None = field(repr=False, default=None) + refresh_token: str | None = field(repr=False, default=None) + full_name: str | None = None + first_name: str | None = None + last_name: str | None = None + email: str | None = None + access_token_expires_at: datetime | None = None is_admin: bool = False - id: Optional[str] = None # the sub claim in the access token - i.e. the Keycloak user ID - access_token: Optional[str] = field(repr=False, default=None) - full_name: Optional[str] = None - first_name: Optional[str] = None - last_name: Optional[str] = None - email: Optional[str] = None @property def is_authenticated(self) -> bool: """Indicates whether the user has successfully logged in.""" return self.id is not None + def get_full_name(self) -> str | None: + """Generate the closest thing to a full name if the full name field is not set.""" + full_name = self.full_name or " ".join(filter(None, (self.first_name, self.last_name))) + if len(full_name) == 0: + return None + return full_name + + +@dataclass(kw_only=True, frozen=True) +class AuthenticatedAPIUser(APIUser): + """The model for a an authenticated user of the API.""" + + id: str + email: str + access_token: str = field(repr=False) + refresh_token: str | None = field(default=None, repr=False) + full_name: str | None = None + first_name: str | None = None + last_name: str | None = None + + +@dataclass(kw_only=True, frozen=True) +class AnonymousAPIUser(APIUser): + """The model for an anonymous user of the API.""" + + id: str + is_admin: bool = field(init=False, default=False) + + @property + def is_authenticated(self) -> bool: + """We cannot authenticate anonymous users, so this is by definition False.""" + return False + class ServiceAdminId(StrEnum): """Types of internal service admins.""" @@ -46,18 +72,22 @@ class ServiceAdminId(StrEnum): secrets_rotation = "secrets_rotation" -@dataclass(kw_only=True) +@dataclass(kw_only=True, frozen=True) class InternalServiceAdmin(APIUser): """Used to gain complete admin access by internal code components when performing tasks not started by users.""" id: ServiceAdminId = ServiceAdminId.migrations - is_admin: bool = field(default=True, init=False) - access_token: Optional[str] = field(repr=False, default=None, init=False) - full_name: Optional[str] = field(default=None, init=False) - first_name: Optional[str] = field(default=None, init=False) - last_name: Optional[str] = field(default=None, init=False) - email: Optional[str] = field(default=None, init=False) - is_authenticated: bool = field(default=True, init=False) + access_token: str = field(repr=False, default="internal-service-admin", init=False) + full_name: str | None = field(default=None, init=False) + first_name: str | None = field(default=None, init=False) + last_name: str | None = field(default=None, init=False) + email: str | None = field(default=None, init=False) + is_admin: bool = field(init=False, default=True) + + @property + def is_authenticated(self) -> bool: + """Internal admin users are always authenticated.""" + return True class GitlabAccessLevel(Enum): @@ -164,3 +194,16 @@ def __true_div__(self, other: "Slug") -> str: message=f"A path can be constructed only from 2 slugs, but the 'divisor' is of type {type(other)}" ) return self.value + "/" + other.value + + +AnyAPIUser = TypeVar("AnyAPIUser", bound=APIUser, covariant=True) + + +class Authenticator(Protocol[AnyAPIUser]): + """Interface for authenticating users.""" + + token_field: str + + async def authenticate(self, access_token: str, request: Request) -> AnyAPIUser: + """Validates the user credentials (i.e. we can say that the user is a valid Renku user).""" + ... diff --git a/components/renku_data_services/crc/db.py b/components/renku_data_services/crc/db.py index a3055a03b..a8e95d795 100644 --- a/components/renku_data_services/crc/db.py +++ b/components/renku_data_services/crc/db.py @@ -7,16 +7,15 @@ """ from asyncio import gather -from collections.abc import Awaitable, Callable, Collection, Sequence +from collections.abc import Callable, Collection, Coroutine, Sequence from dataclasses import dataclass, field from functools import wraps from typing import Any, Concatenate, Optional, ParamSpec, TypeVar, cast -from sqlalchemy import NullPool, create_engine, delete, select +from sqlalchemy import NullPool, create_engine, delete, false, select, true from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.orm import Session, selectinload, sessionmaker from sqlalchemy.sql import Select, and_, not_, or_ -from sqlalchemy.sql.expression import false, true import renku_data_services.base_models as base_models from renku_data_services import errors @@ -116,7 +115,9 @@ def _classes_user_access_control( _T = TypeVar("_T") -def _only_admins(f: Callable[Concatenate[Any, _P], Awaitable[_T]]) -> Callable[Concatenate[Any, _P], Awaitable[_T]]: +def _only_admins( + f: Callable[Concatenate[Any, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Any, _P], Coroutine[Any, Any, _T]]: """Decorator that errors out if the user is not an admin. It expects the APIUser model to be a named parameter in the decorated function or @@ -183,6 +184,21 @@ async def get_resource_pools( output.append(rp.dump(quota)) return output + async def get_default_resource_class(self) -> models.ResourceClass: + """Get the default resource class in the default resource pool.""" + async with self.session_maker() as session: + stmt = ( + select(schemas.ResourceClassORM) + .where(schemas.ResourceClassORM.default == true()) + .where(schemas.ResourceClassORM.resource_pool.has(schemas.ResourcePoolORM.default == true())) + ) + res = await session.scalar(stmt) + if res is None: + raise errors.ProgrammingError( + message="Could not find the default class from the default resource pool, but this has to exist." + ) + return res.dump() + async def filter_resource_pools( self, api_user: base_models.APIUser, diff --git a/components/renku_data_services/notebooks/api.spec.yaml b/components/renku_data_services/notebooks/api.spec.yaml index 4b4b97def..e00952245 100644 --- a/components/renku_data_services/notebooks/api.spec.yaml +++ b/components/renku_data_services/notebooks/api.spec.yaml @@ -35,7 +35,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" - description: 'The maximum number of (most recent) lines to return from the logs.' in: query name: max_lines @@ -152,7 +152,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" - description: | If true, delete immediately disregarding the grace period of the underlying JupyterServer resource. @@ -187,7 +187,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" responses: '200': content: @@ -211,7 +211,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" requestBody: content: application/json: @@ -439,27 +439,24 @@ components: - registered type: object ErrorResponse: - type: object properties: error: - type: object - properties: - code: - type: integer - minimum: 0 - exclusiveMinimum: true - example: 1404 - detail: - type: string - example: "A more detailed optional message showing what the problem was" - message: - type: string - example: "Something went wrong - please try again later" - required: - - "code" - - "message" + "$ref": "#/components/schemas/ErrorResponseNested" + required: + - error + type: object + ErrorResponseNested: + properties: + code: + type: integer + detail: + type: string + message: + type: string required: - - "error" + - code + - message + type: object Generated: properties: enabled: @@ -505,7 +502,6 @@ components: required: - project_id - launcher_id - - image type: object LaunchNotebookRequestRepository: properties: @@ -607,7 +603,7 @@ components: image: type: string name: - type: string + "$ref": "#/components/schemas/ServerName" resources: "$ref": "#/components/schemas/UserPodResources" started: @@ -802,8 +798,6 @@ components: "$ref": "#/components/schemas/ResourceRequests" usage: "$ref": "#/components/schemas/ResourceUsage" - required: - - requests type: object UserSecrets: properties: @@ -898,7 +892,7 @@ components: image: type: string name: - type: string + "$ref": "#/components/schemas/ServerName" resources: "$ref": "#/components/schemas/SessionResources" started: @@ -983,17 +977,12 @@ components: gpu: type: integer description: Number of GPUs used - default: 0 memory: type: integer description: Ammount of RAM for the session, in gigabytes storage: type: integer description: The size of disk storage for the session, in gigabytes - required: - - cpu - - memory - - storage example: cpu: 1.5 memory: 1 @@ -1038,6 +1027,14 @@ components: - configuration - source_path - target_path + ServerName: + type: string + minLength: 5 + # The actual limit from k8s is 63 characters but we leave some leeway in case we need to make changes + # Note that changing this should be compatible with the methods that generate server names in the code + maxLength: 50 + pattern: '^[a-z]([-a-z0-9]*[a-z0-9])?$' + example: d185e68d-d43-renku-2-b9ac279a4e8a85ac28d08 responses: Error: description: The schema for all 4xx and 5xx responses diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py b/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py index e18d4e7c1..861f05ec6 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py @@ -3,23 +3,25 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.cloud_storage import ICloudStorageRequest from renku_data_services.notebooks.api.classes.server import UserServer -def main(server: "UserServer") -> list[dict[str, Any]]: +async def main(server: "UserServer") -> list[dict[str, Any]]: """Cloud storage patches.""" cloud_storage_patches: list[dict[str, Any]] = [] cloud_storage_request: ICloudStorageRequest if not server.cloudstorage: return [] + repositories = await server.repositories() for i, cloud_storage_request in enumerate(server.cloudstorage): cloud_storage_patches.extend( cloud_storage_request.get_manifest_patch( f"{server.server_name}-ds-{i}", server.k8s_client.preferred_namespace ) ) - if server.repositories: + if repositories: cloud_storage_patches.append( { "type": "application/json-patch+json", diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/general.py b/components/renku_data_services/notebooks/api/amalthea_patches/general.py index 897858cac..b613b1eba 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/general.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/general.py @@ -2,9 +2,8 @@ from typing import TYPE_CHECKING, Any -from renku_data_services.notebooks.api.classes.user import RegisteredUser - if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer @@ -159,7 +158,7 @@ def test(server: "UserServer") -> list[dict[str, Any]]: # does not use all containers. container_names = ( server.config.sessions.containers.registered[:2] - if isinstance(server.user, RegisteredUser) + if server.user.is_authenticated else server.config.sessions.containers.anonymous[:1] ) for container_ind, container_name in enumerate(container_names): @@ -181,7 +180,7 @@ def test(server: "UserServer") -> list[dict[str, Any]]: def oidc_unverified_email(server: "UserServer") -> list[dict[str, Any]]: """Allow users whose email is unverified in Keycloak to still be able to access their sessions.""" patches = [] - if isinstance(server.user, RegisteredUser): + if server.user.is_authenticated: # modify oauth2 proxy to accept users whose email has not been verified # usually enabled for dev purposes patches.append( diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py b/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py index a12f3c628..3773c57ea 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py @@ -4,18 +4,20 @@ from dataclasses import asdict from typing import TYPE_CHECKING, Any +from kubernetes import client + from renku_data_services.notebooks.api.amalthea_patches.utils import get_certificates_volume_mounts -from renku_data_services.notebooks.api.classes.user import AnonymousUser if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer -def main(server: "UserServer") -> list[dict[str, Any]]: +async def main_container(server: "UserServer") -> client.V1Container | None: """The patch that adds the git proxy container to a session statefulset.""" - user_is_anonymous = isinstance(server.user, AnonymousUser) - if user_is_anonymous or not server.repositories: - return [] + repositories = await server.repositories() + if not server.user.is_authenticated or not repositories: + return None etc_cert_volume_mount = get_certificates_volume_mounts( server.config, @@ -23,39 +25,84 @@ def main(server: "UserServer") -> list[dict[str, Any]]: etc_certs=True, read_only_etc_certs=True, ) - patches = [] prefix = "GIT_PROXY_" + git_providers = await server.git_providers() + repositories = await server.repositories() env = [ - {"name": f"{prefix}PORT", "value": str(server.config.sessions.git_proxy.port)}, - {"name": f"{prefix}HEALTH_PORT", "value": str(server.config.sessions.git_proxy.health_port)}, - { - "name": f"{prefix}ANONYMOUS_SESSION", - "value": "true" if user_is_anonymous else "false", - }, - {"name": f"{prefix}RENKU_ACCESS_TOKEN", "value": str(server.user.access_token)}, - {"name": f"{prefix}RENKU_REFRESH_TOKEN", "value": str(server.user.refresh_token)}, - {"name": f"{prefix}RENKU_REALM", "value": server.config.keycloak_realm}, - { - "name": f"{prefix}RENKU_CLIENT_ID", - "value": str(server.config.sessions.git_proxy.renku_client_id), + client.V1EnvVar(name=f"{prefix}PORT", value=str(server.config.sessions.git_proxy.port)), + client.V1EnvVar(name=f"{prefix}HEALTH_PORT", value=str(server.config.sessions.git_proxy.health_port)), + client.V1EnvVar( + name=f"{prefix}ANONYMOUS_SESSION", + value="false" if server.user.is_authenticated else "true", + ), + client.V1EnvVar(name=f"{prefix}RENKU_ACCESS_TOKEN", value=str(server.user.access_token)), + client.V1EnvVar(name=f"{prefix}RENKU_REFRESH_TOKEN", value=str(server.user.refresh_token)), + client.V1EnvVar(name=f"{prefix}RENKU_REALM", value=server.config.keycloak_realm), + client.V1EnvVar( + name=f"{prefix}RENKU_CLIENT_ID", + value=str(server.config.sessions.git_proxy.renku_client_id), + ), + client.V1EnvVar( + name=f"{prefix}RENKU_CLIENT_SECRET", + value=str(server.config.sessions.git_proxy.renku_client_secret), + ), + client.V1EnvVar(name=f"{prefix}RENKU_URL", value="https://" + server.config.sessions.ingress.host), + client.V1EnvVar( + name=f"{prefix}REPOSITORIES", + value=json.dumps([asdict(repo) for repo in repositories]), + ), + client.V1EnvVar( + name=f"{prefix}PROVIDERS", + value=json.dumps( + [dict(id=provider.id, access_token_url=provider.access_token_url) for provider in git_providers] + ), + ), + ] + container = client.V1Container( + image=server.config.sessions.git_proxy.image, + security_context={ + "fsGroup": 100, + "runAsGroup": 1000, + "runAsUser": 1000, + "allowPrivilegeEscalation": False, + "runAsNonRoot": True, }, - { - "name": f"{prefix}RENKU_CLIENT_SECRET", - "value": str(server.config.sessions.git_proxy.renku_client_secret), + name="git-proxy", + env=env, + liveness_probe={ + "httpGet": { + "path": "/health", + "port": server.config.sessions.git_proxy.health_port, + }, + "initialDelaySeconds": 3, }, - {"name": f"{prefix}RENKU_URL", "value": "https://" + server.config.sessions.ingress.host}, - { - "name": f"{prefix}REPOSITORIES", - "value": json.dumps([asdict(repo) for repo in server.repositories]), + readiness_probe={ + "httpGet": { + "path": "/health", + "port": server.config.sessions.git_proxy.health_port, + }, + "initialDelaySeconds": 3, }, - { - "name": f"{prefix}PROVIDERS", - "value": json.dumps( - [dict(id=provider.id, access_token_url=provider.access_token_url) for provider in server.git_providers] - ), + volume_mounts=etc_cert_volume_mount, + resources={ + "requests": {"memory": "16Mi", "cpu": "50m"}, }, - ] + ) + return container + + +async def main(server: "UserServer") -> list[dict[str, Any]]: + """The patch that adds the git proxy container to a session statefulset.""" + repositories = await server.repositories() + if not server.user.is_authenticated or not repositories: + return [] + + container = await main_container(server) + if not container: + return [] + + patches = [] patches.append( { @@ -64,37 +111,8 @@ def main(server: "UserServer") -> list[dict[str, Any]]: { "op": "add", "path": "/statefulset/spec/template/spec/containers/-", - "value": { - "image": server.config.sessions.git_proxy.image, - "securityContext": { - "fsGroup": 100, - "runAsGroup": 1000, - "runAsUser": 1000, - "allowPrivilegeEscalation": False, - "runAsNonRoot": True, - }, - "name": "git-proxy", - "env": env, - "livenessProbe": { - "httpGet": { - "path": "/health", - "port": server.config.sessions.git_proxy.health_port, - }, - "initialDelaySeconds": 3, - }, - "readinessProbe": { - "httpGet": { - "path": "/health", - "port": server.config.sessions.git_proxy.health_port, - }, - "initialDelaySeconds": 3, - }, - "volumeMounts": etc_cert_volume_mount, - "resources": { - "requests": {"memory": "16Mi", "cpu": "50m"}, - }, - }, - } + "value": client.ApiClient().sanitize_for_serialization(container), + }, ], } ) diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py b/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py index 05d067ae5..ceac8e248 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py @@ -3,18 +3,18 @@ import os from typing import TYPE_CHECKING, Any -from renku_data_services.notebooks.api.classes.user import RegisteredUser - if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer -def main(server: "UserServer") -> list[dict[str, Any]]: +async def main(server: "UserServer") -> list[dict[str, Any]]: """Adds the git sidecar container to the session statefulset.""" # NOTE: Sessions can be persisted only for registered users - if not isinstance(server.user, RegisteredUser): + if not server.user.is_authenticated: return [] - if not server.repositories: + repositories = await server.repositories() + if not repositories: return [] gitlab_project = getattr(server, "gitlab_project", None) @@ -22,7 +22,7 @@ def main(server: "UserServer") -> list[dict[str, Any]]: commit_sha = getattr(server, "commit_sha", None) volume_mount = { - "mountPath": server.work_dir.absolute().as_posix(), + "mountPath": server.work_dir.as_posix(), "name": "workspace", } if gl_project_path: @@ -51,7 +51,7 @@ def main(server: "UserServer") -> list[dict[str, Any]]: "env": [ { "name": "GIT_RPC_MOUNT_PATH", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, { "name": "GIT_RPC_PORT", @@ -91,7 +91,7 @@ def main(server: "UserServer") -> list[dict[str, Any]]: }, { "name": "RENKU_USERNAME", - "value": f"{server.user.username}", + "value": f"{server.user.id}", }, { "name": "GIT_RPC_GIT_PROXY_HEALTH_PORT", diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py b/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py index a28823517..ccccd0907 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py @@ -6,21 +6,22 @@ from pathlib import Path from typing import TYPE_CHECKING, Any -from gitlab.v4.objects.users import CurrentUser from kubernetes import client from renku_data_services.notebooks.api.amalthea_patches.utils import get_certificates_volume_mounts -from renku_data_services.notebooks.api.classes.user import AnonymousUser, RegisteredUser from renku_data_services.notebooks.config import _NotebooksConfig if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer -def git_clone(server: "UserServer") -> list[dict[str, Any]]: - """Adds the patch for the init container that clones the git repository.""" - if not server.repositories: - return [] +async def git_clone_container_v2(server: "UserServer") -> dict[str, Any] | None: + """Returns the specification for the container that clones the user's repositories for new operator.""" + amalthea_session_work_volume: str = "amalthea-volume" + repositories = await server.repositories() + if not repositories: + return None etc_cert_volume_mount = get_certificates_volume_mounts( server.config, @@ -29,16 +30,16 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: read_only_etc_certs=True, ) - user_is_anonymous = isinstance(server.user, AnonymousUser) + user_is_anonymous = not server.user.is_authenticated prefix = "GIT_CLONE_" env = [ { "name": f"{prefix}WORKSPACE_MOUNT_PATH", - "value": server.workspace_mount_path.absolute().as_posix(), + "value": server.workspace_mount_path.as_posix(), }, { "name": f"{prefix}MOUNT_PATH", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, { "name": f"{prefix}LFS_AUTO_FETCH", @@ -46,7 +47,7 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: }, { "name": f"{prefix}USER__USERNAME", - "value": server.user.username, + "value": server.user.email, }, { "name": f"{prefix}USER__RENKU_TOKEN", @@ -79,21 +80,148 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: "value": str(Path(etc_cert_volume_mount[0]["mountPath"]) / "ca-certificates.crt"), }, ] - if ( - isinstance(server.user, RegisteredUser) - and isinstance(server.user.gitlab_user, CurrentUser) - and not user_is_anonymous - ): - env += [ - {"name": f"{prefix}USER__EMAIL", "value": server.user.gitlab_user.email}, + if server.user.is_authenticated: + if server.user.email: + env.append( + {"name": f"{prefix}USER__EMAIL", "value": server.user.email}, + ) + full_name = server.user.get_full_name() + if full_name: + env.append( + { + "name": f"{prefix}USER__FULL_NAME", + "value": full_name, + }, + ) + + # Set up git repositories + for idx, repo in enumerate(repositories): + obj_env = f"{prefix}REPOSITORIES_{idx}_" + env.append( + { + "name": obj_env, + "value": json.dumps(asdict(repo)), + } + ) + + # Set up git providers + required_git_providers = await server.required_git_providers() + for idx, provider in enumerate(required_git_providers): + obj_env = f"{prefix}GIT_PROVIDERS_{idx}_" + data = dict(id=provider.id, access_token_url=provider.access_token_url) + env.append( + { + "name": obj_env, + "value": json.dumps(data), + } + ) + + return { + "image": server.config.sessions.git_clone.image, + "name": "git-clone", + "resources": { + "requests": { + "cpu": "100m", + "memory": "100Mi", + } + }, + "securityContext": { + "allowPrivilegeEscalation": False, + "fsGroup": 100, + "runAsGroup": 100, + "runAsUser": 1000, + "runAsNonRoot": True, + }, + "volumeMounts": [ { - "name": f"{prefix}USER__FULL_NAME", - "value": server.user.gitlab_user.name, + "mountPath": server.workspace_mount_path.as_posix(), + "name": amalthea_session_work_volume, }, - ] + *etc_cert_volume_mount, + ], + "env": env, + } + + +async def git_clone_container(server: "UserServer") -> dict[str, Any] | None: + """Returns the specification for the container that clones the user's repositories.""" + repositories = await server.repositories() + if not repositories: + return None + + etc_cert_volume_mount = get_certificates_volume_mounts( + server.config, + custom_certs=False, + etc_certs=True, + read_only_etc_certs=True, + ) + + user_is_anonymous = not server.user.is_authenticated + prefix = "GIT_CLONE_" + env = [ + { + "name": f"{prefix}WORKSPACE_MOUNT_PATH", + "value": server.workspace_mount_path.as_posix(), + }, + { + "name": f"{prefix}MOUNT_PATH", + "value": server.work_dir.as_posix(), + }, + { + "name": f"{prefix}LFS_AUTO_FETCH", + "value": "1" if server.server_options.lfs_auto_fetch else "0", + }, + { + "name": f"{prefix}USER__USERNAME", + "value": server.user.email, + }, + { + "name": f"{prefix}USER__RENKU_TOKEN", + "value": str(server.user.access_token), + }, + {"name": f"{prefix}IS_GIT_PROXY_ENABLED", "value": "0" if user_is_anonymous else "1"}, + { + "name": f"{prefix}SENTRY__ENABLED", + "value": str(server.config.sessions.git_clone.sentry.enabled).lower(), + }, + { + "name": f"{prefix}SENTRY__DSN", + "value": server.config.sessions.git_clone.sentry.dsn, + }, + { + "name": f"{prefix}SENTRY__ENVIRONMENT", + "value": server.config.sessions.git_clone.sentry.env, + }, + { + "name": f"{prefix}SENTRY__SAMPLE_RATE", + "value": str(server.config.sessions.git_clone.sentry.sample_rate), + }, + {"name": "SENTRY_RELEASE", "value": os.environ.get("SENTRY_RELEASE")}, + { + "name": "REQUESTS_CA_BUNDLE", + "value": str(Path(etc_cert_volume_mount[0]["mountPath"]) / "ca-certificates.crt"), + }, + { + "name": "SSL_CERT_FILE", + "value": str(Path(etc_cert_volume_mount[0]["mountPath"]) / "ca-certificates.crt"), + }, + ] + if server.user.is_authenticated: + if server.user.email: + env.append( + {"name": f"{prefix}USER__EMAIL", "value": server.user.email}, + ) + full_name = server.user.get_full_name() + if full_name: + env.append( + { + "name": f"{prefix}USER__FULL_NAME", + "value": full_name, + }, + ) # Set up git repositories - for idx, repo in enumerate(server.repositories): + for idx, repo in enumerate(repositories): obj_env = f"{prefix}REPOSITORIES_{idx}_" env.append( { @@ -103,7 +231,8 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: ) # Set up git providers - for idx, provider in enumerate(server.required_git_providers): + required_git_providers = await server.required_git_providers() + for idx, provider in enumerate(required_git_providers): obj_env = f"{prefix}GIT_PROVIDERS_{idx}_" data = dict(id=provider.id, access_token_url=provider.access_token_url) env.append( @@ -113,6 +242,38 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: } ) + return { + "image": server.config.sessions.git_clone.image, + "name": "git-clone", + "resources": { + "requests": { + "cpu": "100m", + "memory": "100Mi", + } + }, + "securityContext": { + "allowPrivilegeEscalation": False, + "fsGroup": 100, + "runAsGroup": 100, + "runAsUser": 1000, + "runAsNonRoot": True, + }, + "volumeMounts": [ + { + "mountPath": server.workspace_mount_path.as_posix(), + "name": "workspace", + }, + *etc_cert_volume_mount, + ], + "env": env, + } + + +async def git_clone(server: "UserServer") -> list[dict[str, Any]]: + """The patch for the init container that clones the git repository.""" + container = await git_clone_container(server) + if not container: + return [] return [ { "type": "application/json-patch+json", @@ -120,39 +281,15 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: { "op": "add", "path": "/statefulset/spec/template/spec/initContainers/-", - "value": { - "image": server.config.sessions.git_clone.image, - "name": "git-clone", - "resources": { - "requests": { - "cpu": "100m", - "memory": "100Mi", - } - }, - "securityContext": { - "allowPrivilegeEscalation": False, - "fsGroup": 100, - "runAsGroup": 100, - "runAsUser": 1000, - "runAsNonRoot": True, - }, - "volumeMounts": [ - { - "mountPath": server.workspace_mount_path.absolute().as_posix(), - "name": "workspace", - }, - *etc_cert_volume_mount, - ], - "env": env, - }, + "value": container, }, ], } ] -def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: - """Add a container that initializes custom certificate authorities for a session.""" +def certificates_container(config: _NotebooksConfig) -> tuple[client.V1Container, list[client.V1Volume]]: + """The specification for the container that setups self signed CAs.""" init_container = client.V1Container( name="init-certificates", image=config.sessions.ca_certs.image, @@ -181,6 +318,12 @@ def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: ], ), ) + return (init_container, [volume_etc_certs, volume_custom_certs]) + + +def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: + """Add a container that initializes custom certificate authorities for a session.""" + container, vols = certificates_container(config) api_client = client.ApiClient() patches = [ { @@ -189,35 +332,28 @@ def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: { "op": "add", "path": "/statefulset/spec/template/spec/initContainers/-", - "value": api_client.sanitize_for_serialization(init_container), - }, - ], - }, - { - "type": "application/json-patch+json", - "patch": [ - { - "op": "add", - "path": "/statefulset/spec/template/spec/volumes/-", - "value": api_client.sanitize_for_serialization(volume_etc_certs), - }, - ], - }, - { - "type": "application/json-patch+json", - "patch": [ - { - "op": "add", - "path": "/statefulset/spec/template/spec/volumes/-", - "value": api_client.sanitize_for_serialization(volume_custom_certs), + "value": api_client.sanitize_for_serialization(container), }, ], }, ] + for vol in vols: + patches.append( + { + "type": "application/json-patch+json", + "patch": [ + { + "op": "add", + "path": "/statefulset/spec/template/spec/volumes/-", + "value": api_client.sanitize_for_serialization(vol), + }, + ], + }, + ) return patches -def download_image(server: "UserServer") -> list[dict[str, Any]]: +def download_image_container(server: "UserServer") -> client.V1Container: """Adds a container that does not do anything but simply downloads the session image at startup.""" container = client.V1Container( name="download-image", @@ -231,6 +367,12 @@ def download_image(server: "UserServer") -> list[dict[str, Any]]: } }, ) + return container + + +def download_image(server: "UserServer") -> list[dict[str, Any]]: + """Adds a container that does not do anything but simply downloads the session image at startup.""" + container = download_image_container(server) api_client = client.ApiClient() return [ { diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py b/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py index c120b6abb..e46707dc8 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py @@ -4,9 +4,9 @@ from typing import TYPE_CHECKING, Any from renku_data_services.notebooks.api.amalthea_patches.utils import get_certificates_volume_mounts -from renku_data_services.notebooks.api.classes.user import RegisteredUser if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer @@ -31,7 +31,7 @@ def proxy(server: "UserServer") -> list[dict[str, Any]]: ], }, ] - if isinstance(server.user, RegisteredUser): + if server.user.is_authenticated: patches.append( { "type": "application/json-patch+json", diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py b/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py index 6ad0b3cb0..6f7affc09 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py @@ -5,13 +5,13 @@ from pathlib import Path from typing import TYPE_CHECKING, Any -from gitlab.v4.objects.users import CurrentUser from kubernetes import client -from renku_data_services.notebooks.api.classes.user import RegisteredUser +from renku_data_services.base_models.core import AuthenticatedAPIUser from renku_data_services.notebooks.errors.user import OverriddenEnvironmentVariableError if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer @@ -30,7 +30,7 @@ def env(server: "UserServer") -> list[dict[str, Any]]: "path": "/statefulset/spec/template/spec/containers/0/env/-", "value": { "name": "RENKU_USERNAME", - "value": server.user.username, + "value": server.user.id, }, }, { @@ -43,7 +43,7 @@ def env(server: "UserServer") -> list[dict[str, Any]]: "path": "/statefulset/spec/template/spec/containers/0/env/-", "value": { "name": "NOTEBOOK_DIR", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, }, { @@ -53,7 +53,7 @@ def env(server: "UserServer") -> list[dict[str, Any]]: # relative to $HOME. "value": { "name": "MOUNT_PATH", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, }, { @@ -109,21 +109,17 @@ def args() -> list[dict[str, Any]]: return patches -def image_pull_secret(server: "UserServer") -> list[dict[str, Any]]: +def image_pull_secret(server: "UserServer", access_token: str | None) -> list[dict[str, Any]]: """Adds an image pull secret to the session if the session image is not public.""" patches = [] - if ( - isinstance(server.user, RegisteredUser) - and isinstance(server.user.gitlab_user, CurrentUser) - and server.is_image_private - ): + if isinstance(server.user, AuthenticatedAPIUser) and server.is_image_private and access_token: image_pull_secret_name = server.server_name + "-image-secret" registry_secret = { "auths": { server.config.git.registry: { "Username": "oauth2", - "Password": server.user.git_token, - "Email": server.user.gitlab_user.email, + "Password": access_token, + "Email": server.user.email, } } } @@ -227,7 +223,7 @@ def rstudio_env_variables(server: "UserServer") -> list[dict[str, Any]]: "path": "/statefulset/spec/template/spec/containers/0/volumeMounts/-", "value": { "name": secret_name, - "mountPath": mount_location.absolute().as_posix(), + "mountPath": mount_location.as_posix(), "subPath": mount_location.name, "readOnly": True, }, diff --git a/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py b/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py index 07da3ab5c..b3abeac84 100644 --- a/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py +++ b/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py @@ -1,7 +1,10 @@ """Cloud storage.""" from dataclasses import dataclass -from typing import Any, Self +from typing import Any, Self, cast + +from renku_data_services.errors import errors +from renku_data_services.notebooks.crs import JupyterServerV1Alpha1 @dataclass @@ -12,11 +15,13 @@ class ExistingCloudStorage: type: str @classmethod - def from_manifest(cls, manifest: dict[str, Any], storage_class: str = "csi-rclone") -> list[Self]: + def from_manifest(cls, manifest: JupyterServerV1Alpha1, storage_class: str = "csi-rclone") -> list[Self]: """The patches applied to a jupyter server to insert the storage in the session.""" + if manifest.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") output: list[Self] = [] - for patch_collection in manifest["spec"]["patches"]: - for patch in patch_collection["patch"]: + for patch_collection in manifest.spec.patches: + for patch in cast(list[dict[str, Any]], patch_collection.patch): if patch["op"] == "test": continue if not isinstance(patch["value"], dict): diff --git a/components/renku_data_services/notebooks/api/classes/data_service.py b/components/renku_data_services/notebooks/api/classes/data_service.py index e2f1c7973..4e7b6d44c 100644 --- a/components/renku_data_services/notebooks/api/classes/data_service.py +++ b/components/renku_data_services/notebooks/api/classes/data_service.py @@ -4,11 +4,20 @@ from typing import Any, NamedTuple, Optional, cast from urllib.parse import urljoin, urlparse -import requests +import httpx from sanic.log import logger +from renku_data_services.base_models import APIUser +from renku_data_services.crc.db import ResourcePoolRepository +from renku_data_services.crc.models import ResourceClass, ResourcePool +from renku_data_services.notebooks.api.classes.repository import ( + INTERNAL_GITLAB_PROVIDER, + GitProvider, + OAuth2Connection, + OAuth2Provider, +) +from renku_data_services.notebooks.api.schemas.server_options import ServerOptions from renku_data_services.notebooks.errors.intermittent import IntermittentError -from renku_data_services.notebooks.errors.programming import ConfigurationError from renku_data_services.notebooks.errors.user import ( AuthenticationError, InvalidCloudStorageConfiguration, @@ -16,10 +25,6 @@ MissingResourceError, ) -from ..schemas.server_options import ServerOptions -from .repository import INTERNAL_GITLAB_PROVIDER, GitProvider, OAuth2Connection, OAuth2Provider -from .user import User - class CloudStorageConfig(NamedTuple): """Cloud storage configuration.""" @@ -40,18 +45,21 @@ class StorageValidator: def __post_init__(self) -> None: self.storage_url = self.storage_url.rstrip("/") - def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> CloudStorageConfig: + async def get_storage_by_id( + self, user: APIUser, internal_gitlab_user: APIUser, project_id: int, storage_id: str + ) -> CloudStorageConfig: """Get a specific cloud storage configuration by ID.""" headers = None - if user is not None and user.access_token is not None and user.git_token is not None: + if user is not None and user.access_token is not None and internal_gitlab_user.access_token is not None: headers = { "Authorization": f"bearer {user.access_token}", - "Gitlab-Access-Token": user.git_token, + "Gitlab-Access-Token": user.access_token, } # TODO: remove project_id once authz on the data service works properly request_url = self.storage_url + f"/storage/{storage_id}?project_id={project_id}" logger.info(f"getting storage info by id: {request_url}") - res = requests.get(request_url, headers=headers, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.get(request_url, headers=headers, timeout=10) if res.status_code == 404: raise MissingResourceError(message=f"Couldn't find cloud storage with id {storage_id}") if res.status_code == 401: @@ -69,9 +77,10 @@ def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> Clo name=storage["name"], ) - def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: + async def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: """Validate the cloud storage configuration.""" - res = requests.post(self.storage_url + "/storage_schema/validate", json=configuration, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.post(self.storage_url + "/storage_schema/validate", json=configuration, timeout=10) if res.status_code == 422: raise InvalidCloudStorageConfiguration( message=f"The provided cloud storage configuration isn't valid: {res.json()}", @@ -81,9 +90,10 @@ def validate_storage_configuration(self, configuration: dict[str, Any], source_p message="The data service sent an unexpected response, please try again later", ) - def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: + async def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: """Obscures password fields for use with rclone.""" - res = requests.post(self.storage_url + "/storage_schema/obscure", json=configuration, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.post(self.storage_url + "/storage_schema/obscure", json=configuration, timeout=10) if res.status_code != 200: raise InvalidCloudStorageConfiguration( @@ -97,15 +107,17 @@ def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> class DummyStorageValidator: """Dummy cloud storage validator used for testing.""" - def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> CloudStorageConfig: + async def get_storage_by_id( + self, user: APIUser, internal_gitlab_user: APIUser, project_id: int, storage_id: str + ) -> CloudStorageConfig: """Get storage by ID.""" raise NotImplementedError() - def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: + async def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: """Validate the cloud storage configuration.""" raise NotImplementedError() - def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: + async def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: """Obscure the password fields in a cloud storage configuration.""" raise NotImplementedError() @@ -114,14 +126,11 @@ def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> class CRCValidator: """Calls to the CRC service to validate resource requests.""" - crc_url: str - - def __post_init__(self) -> None: - self.crc_url = self.crc_url.rstrip("/") + rp_repo: ResourcePoolRepository - def validate_class_storage( + async def validate_class_storage( self, - user: User, + user: APIUser, class_id: int, storage: Optional[int] = None, ) -> ServerOptions: @@ -129,105 +138,83 @@ def validate_class_storage( Storage in memory are assumed to be in gigabytes. """ - resource_pools = self._get_resource_pools(user=user) - pool = None - res_class = None + resource_pools = await self.rp_repo.get_resource_pools(user) + pool: ResourcePool | None = None + res_class: ResourceClass | None = None for rp in resource_pools: - for cls in rp["classes"]: - if cls["id"] == class_id: + for cls in rp.classes: + if cls.id == class_id: res_class = cls pool = rp break if pool is None or res_class is None: raise InvalidComputeResourceError(message=f"The resource class ID {class_id} does not exist.") if storage is None: - storage = res_class.get("default_storage", 1) + storage = res_class.default_storage if storage < 1: raise InvalidComputeResourceError(message="Storage requests have to be greater than or equal to 1GB.") - if storage > res_class.get("max_storage"): + if storage > res_class.max_storage: raise InvalidComputeResourceError(message="The requested storage surpasses the maximum value allowed.") options = ServerOptions.from_resource_class(res_class) - options.idle_threshold_seconds = pool.get("idle_threshold") - options.hibernation_threshold_seconds = pool.get("hibernation_threshold") + options.idle_threshold_seconds = pool.idle_threshold + options.hibernation_threshold_seconds = pool.hibernation_threshold options.set_storage(storage, gigabytes=True) - quota = pool.get("quota") - if quota is not None and isinstance(quota, dict): - options.priority_class = quota.get("id") + quota = pool.quota + if quota is not None: + options.priority_class = quota.id return options - def get_default_class(self) -> dict[str, Any]: + async def get_default_class(self) -> ResourceClass: """Get the default resource class from the default resource pool.""" - pools = self._get_resource_pools() - default_pools = [p for p in pools if p.get("default", False)] - if len(default_pools) < 1: - raise ConfigurationError("Cannot find the default resource pool.") - default_pool = default_pools[0] - default_classes: list[dict[str, Any]] = [ - cls for cls in default_pool.get("classes", []) if cls.get("default", False) - ] - if len(default_classes) < 1: - raise ConfigurationError("Cannot find the default resource class.") - return default_classes[0] - - def find_acceptable_class(self, user: User, requested_server_options: ServerOptions) -> Optional[ServerOptions]: + return await self.rp_repo.get_default_resource_class() + + async def find_acceptable_class( + self, user: APIUser, requested_server_options: ServerOptions + ) -> Optional[ServerOptions]: """Find a resource class greater than or equal to the old-style server options being requested. Only classes available to the user are considered. """ - resource_pools = self._get_resource_pools(user=user, server_options=requested_server_options) + resource_pools = await self._get_resource_pools(user=user, server_options=requested_server_options) # Difference and best candidate in the case that the resource class will be # greater than or equal to the request best_larger_or_equal_diff: ServerOptions | None = None best_larger_or_equal_class: ServerOptions | None = None zero_diff = ServerOptions(cpu=0, memory=0, gpu=0, storage=0) for resource_pool in resource_pools: - quota = resource_pool.get("quota") - for resource_class in resource_pool["classes"]: + quota = resource_pool.quota + for resource_class in resource_pool.classes: resource_class_mdl = ServerOptions.from_resource_class(resource_class) - if quota is not None and isinstance(quota, dict): - resource_class_mdl.priority_class = quota.get("id") + if quota is not None: + resource_class_mdl.priority_class = quota.id diff = resource_class_mdl - requested_server_options if ( diff >= zero_diff and (best_larger_or_equal_diff is None or diff < best_larger_or_equal_diff) - and resource_class["matching"] + and resource_class.matching ): best_larger_or_equal_diff = diff best_larger_or_equal_class = resource_class_mdl return best_larger_or_equal_class - def _get_resource_pools( + async def _get_resource_pools( self, - user: Optional[User] = None, + user: APIUser, server_options: Optional[ServerOptions] = None, - ) -> list[dict[str, Any]]: - headers = None - params = None - if user is not None and user.access_token is not None: - headers = {"Authorization": f"bearer {user.access_token}"} + ) -> list[ResourcePool]: + output: list[ResourcePool] = [] if server_options is not None: - max_storage: float | int = 1 - if server_options.storage is not None: - max_storage = ( - server_options.storage - if server_options.gigabytes - else round(server_options.storage / 1_000_000_000) - ) - params = { - "cpu": server_options.cpu, - "gpu": server_options.gpu, - "memory": ( - server_options.memory if server_options.gigabytes else round(server_options.memory / 1_000_000_000) - ), - "max_storage": max_storage, - } - res = requests.get(self.crc_url + "/resource_pools", headers=headers, params=params, timeout=10) - if res.status_code != 200: - raise IntermittentError( - message="The compute resource access control service sent " - "an unexpected response, please try again later", + options_gb = server_options.to_gigabytes() + output = await self.rp_repo.filter_resource_pools( + user, + cpu=options_gb.cpu, + memory=round(options_gb.memory), + max_storage=round(options_gb.storage or 1), + gpu=options_gb.gpu, ) - return cast(list[dict[str, Any]], res.json()) + else: + output = await self.rp_repo.filter_resource_pools(user) + return output @dataclass @@ -236,24 +223,26 @@ class DummyCRCValidator: options: ServerOptions = field(default_factory=lambda: ServerOptions(0.5, 1, 0, 1, "/lab", False, True)) - def validate_class_storage(self, user: User, class_id: int, storage: int | None = None) -> ServerOptions: + async def validate_class_storage(self, user: APIUser, class_id: int, storage: int | None = None) -> ServerOptions: """Validate the storage against the resource class.""" return self.options - def get_default_class(self) -> dict[str, Any]: + async def get_default_class(self) -> ResourceClass: """Get the default resource class.""" - return { - "name": "resource class", - "cpu": 0.1, - "memory": 1, - "gpu": 0, - "max_storage": 100, - "default_storage": 1, - "id": 1, - "default": True, - } - - def find_acceptable_class(self, user: User, requested_server_options: ServerOptions) -> Optional[ServerOptions]: + return ResourceClass( + name="resource class", + cpu=0.1, + memory=1, + max_storage=100, + gpu=0, + id=1, + default_storage=1, + default=True, + ) + + async def find_acceptable_class( + self, user: APIUser, requested_server_options: ServerOptions + ) -> Optional[ServerOptions]: """Find an acceptable resource class based on the required options.""" return self.options @@ -270,16 +259,16 @@ def __post_init__(self) -> None: self.service_url = self.service_url.rstrip("/") self.renku_url = self.renku_url.rstrip("/") - def get_providers(self, user: User) -> list[GitProvider]: + async def get_providers(self, user: APIUser) -> list[GitProvider]: """Get the providers for the specific user.""" if user is None or user.access_token is None: return [] - connections = self.get_oauth2_connections(user=user) + connections = await self.get_oauth2_connections(user=user) providers: dict[str, GitProvider] = dict() for c in connections: if c.provider_id in providers: continue - provider = self.get_oauth2_provider(c.provider_id) + provider = await self.get_oauth2_provider(c.provider_id) access_token_url = urljoin( self.renku_url, urlparse(f"{self.service_url}/oauth2/connections/{c.id}/token").path, @@ -305,23 +294,25 @@ def get_providers(self, user: User) -> list[GitProvider]: ) return providers_list - def get_oauth2_connections(self, user: User | None = None) -> list[OAuth2Connection]: + async def get_oauth2_connections(self, user: APIUser | None = None) -> list[OAuth2Connection]: """Get oauth2 connections.""" if user is None or user.access_token is None: return [] request_url = f"{self.service_url}/oauth2/connections" headers = {"Authorization": f"bearer {user.access_token}"} - res = requests.get(request_url, headers=headers, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.get(request_url, headers=headers, timeout=10) if res.status_code != 200: raise IntermittentError(message="The data service sent an unexpected response, please try again later") connections = res.json() connections = [OAuth2Connection.from_dict(c) for c in connections if c["status"] == "connected"] return connections - def get_oauth2_provider(self, provider_id: str) -> OAuth2Provider: + async def get_oauth2_provider(self, provider_id: str) -> OAuth2Provider: """Get a specific provider.""" request_url = f"{self.service_url}/oauth2/providers/{provider_id}" - res = requests.get(request_url, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.get(request_url, timeout=10) if res.status_code != 200: raise IntermittentError(message="The data service sent an unexpected response, please try again later") provider = res.json() @@ -332,6 +323,6 @@ def get_oauth2_provider(self, provider_id: str) -> OAuth2Provider: class DummyGitProviderHelper: """Helper for git providers.""" - def get_providers(self, user: User) -> list[GitProvider]: + async def get_providers(self, user: APIUser) -> list[GitProvider]: """Get a list of providers.""" return [] diff --git a/components/renku_data_services/notebooks/api/classes/image.py b/components/renku_data_services/notebooks/api/classes/image.py index 6a38aaf7f..6ced400eb 100644 --- a/components/renku_data_services/notebooks/api/classes/image.py +++ b/components/renku_data_services/notebooks/api/classes/image.py @@ -4,7 +4,7 @@ import re from dataclasses import dataclass, field from enum import Enum -from pathlib import Path +from pathlib import PurePosixPath from typing import Any, Optional, Self, cast import requests @@ -101,7 +101,7 @@ def get_image_config(self, image: "Image") -> Optional[dict[str, Any]]: return None return cast(dict[str, Any], res.json()) - def image_workdir(self, image: "Image") -> Optional[Path]: + def image_workdir(self, image: "Image") -> Optional[PurePosixPath]: """Query the docker API to get the workdir of an image.""" config = self.get_image_config(image) if config is None: @@ -112,7 +112,7 @@ def image_workdir(self, image: "Image") -> Optional[Path]: workdir = nested_config.get("WorkingDir", "/") if workdir == "": workdir = "/" - return Path(workdir) + return PurePosixPath(workdir) def with_oauth2_token(self, oauth2_token: str) -> "ImageRepoDockerAPI": """Return a docker API instance with the token as authentication.""" diff --git a/components/renku_data_services/notebooks/api/classes/k8s_client.py b/components/renku_data_services/notebooks/api/classes/k8s_client.py index 26b7f4d4b..822e301e8 100644 --- a/components/renku_data_services/notebooks/api/classes/k8s_client.py +++ b/components/renku_data_services/notebooks/api/classes/k8s_client.py @@ -1,241 +1,216 @@ -"""An abstraction over the k8s client and the k8s-watcher.""" +"""An abstraction over the kr8s kubernetes client and the k8s-watcher.""" import base64 import json -from typing import Any, Optional, cast +import logging +from contextlib import suppress +from typing import Any, Generic, Optional, TypeVar, cast from urllib.parse import urljoin -import requests -from kubernetes import client -from kubernetes.client.exceptions import ApiException -from kubernetes.client.models import V1Container, V1DeleteOptions -from kubernetes.config import load_config -from kubernetes.config.config_exception import ConfigException -from kubernetes.config.incluster_config import SERVICE_CERT_FILENAME, SERVICE_TOKEN_FILENAME, InClusterConfigLoader -from sanic.log import logger +import httpx +from kr8s import NotFoundError, ServerError +from kr8s.asyncio.objects import APIObject, Pod, Secret, StatefulSet +from kubernetes.client import ApiClient, V1Container, V1Secret -from ...errors.intermittent import ( +from renku_data_services.errors import errors +from renku_data_services.notebooks.api.classes.auth import GitlabToken, RenkuTokens +from renku_data_services.notebooks.crs import AmaltheaSessionV1Alpha1, JupyterServerV1Alpha1 +from renku_data_services.notebooks.errors.intermittent import ( CannotStartServerError, DeleteServerError, IntermittentError, JSCacheError, PatchServerError, ) -from ...errors.programming import ProgrammingError -from ...errors.user import MissingResourceError -from ...util.kubernetes_ import find_env_var -from ...util.retries import retry_with_exponential_backoff -from .auth import GitlabToken, RenkuTokens +from renku_data_services.notebooks.errors.programming import ProgrammingError +from renku_data_services.notebooks.errors.user import MissingResourceError +from renku_data_services.notebooks.util.kubernetes_ import find_env_var +from renku_data_services.notebooks.util.retries import ( + retry_with_exponential_backoff_async, +) + +sanitize_for_serialization = ApiClient().sanitize_for_serialization + + +# NOTE The type ignore below is because the kr8s library has no type stubs, they claim pyright better handles type hints +class AmaltheaSessionV1Alpha1Kr8s(APIObject): # type: ignore + """Spec for amalthea sessions used by the k8s client.""" + + kind: str = "AmaltheaSession" + version: str = "amalthea.dev/v1alpha1" + namespaced: bool = True + plural: str = "amaltheasessions" + singular: str = "amaltheasession" + scalable: bool = False + endpoint: str = "amaltheasessions" + + +# NOTE The type ignore below is because the kr8s library has no type stubs, they claim pyright better handles type hints +class JupyterServerV1Alpha1Kr8s(APIObject): # type: ignore + """Spec for jupyter servers used by the k8s client.""" + + kind: str = "JupyterServer" + version: str = "amalthea.dev/v1alpha1" + namespaced: bool = True + plural: str = "jupyterservers" + singular: str = "jupyterserver" + scalable: bool = False + endpoint: str = "jupyterservers" + +_SessionType = TypeVar("_SessionType", JupyterServerV1Alpha1, AmaltheaSessionV1Alpha1) +_Kr8sType = TypeVar("_Kr8sType", JupyterServerV1Alpha1Kr8s, AmaltheaSessionV1Alpha1Kr8s) -class NamespacedK8sClient: + +class NamespacedK8sClient(Generic[_SessionType, _Kr8sType]): """A kubernetes client that operates in a specific namespace.""" - def __init__( - self, - namespace: str, - amalthea_group: str, - amalthea_version: str, - amalthea_plural: str, - ): + def __init__(self, namespace: str, server_type: type[_SessionType], kr8s_type: type[_Kr8sType]): self.namespace = namespace - self.amalthea_group = amalthea_group - self.amalthea_version = amalthea_version - self.amalthea_plural = amalthea_plural - # NOTE: Try to load in-cluster config first, if that fails try to load kube config - try: - InClusterConfigLoader( - token_filename=SERVICE_TOKEN_FILENAME, - cert_filename=SERVICE_CERT_FILENAME, - ).load_and_set() - except ConfigException: - load_config() - self._custom_objects = client.CustomObjectsApi(client.ApiClient()) - self._custom_objects_patch = client.CustomObjectsApi(client.ApiClient()) - self._custom_objects_patch.api_client.set_default_header("Content-Type", "application/json-patch+json") - self._core_v1 = client.CoreV1Api() - self._apps_v1 = client.AppsV1Api() - - def _get_container_logs( - self, pod_name: str, container_name: str, max_log_lines: Optional[int] = None - ) -> Optional[str]: - try: - logs = cast( - str, - self._core_v1.read_namespaced_pod_log( - pod_name, - self.namespace, - container=container_name, - tail_lines=max_log_lines, - timestamps=True, - ), - ) - except ApiException as err: - if err.status in [400, 404]: - return None # container does not exist or is not ready yet - else: - raise IntermittentError(f"Logs cannot be read for pod {pod_name}, container {container_name}.") - else: - return logs - - def get_pod_logs(self, name: str, containers: list[str], max_log_lines: Optional[int] = None) -> dict[str, str]: + self.server_type: type[_SessionType] = server_type + self._kr8s_type: type[_Kr8sType] = kr8s_type + if (self.server_type == AmaltheaSessionV1Alpha1 and self._kr8s_type == JupyterServerV1Alpha1Kr8s) or ( + self.server_type == JupyterServerV1Alpha1 and self._kr8s_type == AmaltheaSessionV1Alpha1Kr8s + ): + raise errors.ProgrammingError(message="Incompatible manifest and client types in k8s client") + self.sanitize = ApiClient().sanitize_for_serialization + + async def get_pod_logs(self, name: str, max_log_lines: Optional[int] = None) -> dict[str, str]: """Get the logs of all containers in the session.""" - output = {} + pod = cast(Pod, await Pod.get(name=name, namespace=self.namespace)) + logs: dict[str, str] = {} + containers = [i.name for i in pod.spec.containers] + [i.name for i in pod.spec.initContainers] for container in containers: - logs = self._get_container_logs(pod_name=name, container_name=container, max_log_lines=max_log_lines) - if logs: - output[container] = logs - return output + try: + # NOTE: calling pod.logs without a container name set crashes the library + clogs: list[str] = [i async for i in pod.logs(container=container, tail_lines=max_log_lines)] + except NotFoundError: + raise errors.MissingResourceError(message=f"The session pod {name} does not exist.") + except ServerError as err: + if err.status == 404: + raise errors.MissingResourceError(message=f"The session pod {name} does not exist.") + raise + else: + logs[container] = "\n".join(clogs) + return logs - def get_secret(self, name: str) -> Optional[dict[str, Any]]: + async def get_secret(self, name: str) -> Secret | None: """Read a specific secret from the cluster.""" try: - secret = cast(dict[str, Any], self._core_v1.read_namespaced_secret(name, self.namespace)) - except client.rest.ApiException: + secret = await Secret.get(name, self.namespace) + except NotFoundError: return None return secret - def create_server(self, manifest: dict[str, Any]) -> dict[str, Any]: + async def create_server(self, manifest: _SessionType) -> _SessionType: """Create a jupyter server in the cluster.""" - server_name = manifest.get("metadata", {}).get("name") + # NOTE: You have to exclude none when using model dump below because otherwise we get + # namespace=null which seems to break the kr8s client or simply k8s does not translate + # namespace = null to the default namespace. + manifest.metadata.namespace = self.namespace + js = await self._kr8s_type(manifest.model_dump(exclude_none=True, mode="json")) + server_name = manifest.metadata.name try: - self._custom_objects.create_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - body=manifest, - ) - except ApiException as e: - logger.exception(f"Cannot start server {server_name} because of {e}") + await js.create() + except ServerError as e: + logging.exception(f"Cannot start server {server_name} because of {e}") raise CannotStartServerError( message=f"Cannot start the session {server_name}", ) + # NOTE: If refresh is not called then upon creating the object the status is blank + await js.refresh() # NOTE: We wait for the cache to sync with the newly created server # If not then the user will get a non-null response from the POST request but # then immediately after a null response because the newly created server has # not made it into the cache. With this we wait for the cache to catch up # before we send the response from the POST request out. Exponential backoff # is used to avoid overwhelming the cache. - server = retry_with_exponential_backoff(lambda x: x is None)(self.get_server)(server_name) + server = await retry_with_exponential_backoff_async(lambda x: x is None)(self.get_server)(server_name) if server is None: raise CannotStartServerError(message=f"Cannot start the session {server_name}") return server - def patch_server(self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]]) -> dict[str, Any]: + async def patch_server(self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]]) -> _SessionType: """Patch the server.""" + server = await self._kr8s_type(dict(metadata=dict(name=server_name, namespace=self.namespace))) + patch_type: str | None = None # rfc7386 patch + if isinstance(patch, list): + patch_type = "json" # rfc6902 patch try: - if isinstance(patch, list): # noqa: SIM108 - # NOTE: The _custom_objects_patch will only accept rfc6902 json-patch. - # We can recognize the type of patch because this is the only one that uses a list - client = self._custom_objects_patch - else: - # NOTE: The _custom_objects will accept the usual rfc7386 merge patches - client = self._custom_objects - - server = cast( - dict[str, Any], - client.patch_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - name=server_name, - body=patch, - ), - ) - - except ApiException as e: - logger.exception(f"Cannot patch server {server_name} because of {e}") + await server.patch(patch, type=patch_type) + except ServerError as e: + logging.exception(f"Cannot patch server {server_name} because of {e}") raise PatchServerError() - return server + return self.server_type.model_validate(server.to_dict()) - def patch_statefulset( - self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]] | client.V1StatefulSet - ) -> client.V1StatefulSet | None: + async def patch_statefulset( + self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]] + ) -> StatefulSet | None: """Patch a statefulset.""" + sts = await StatefulSet(dict(metadata=dict(name=server_name, namespace=self.namespace))) + patch_type: str | None = None # rfc7386 patch + if isinstance(patch, list): + patch_type = "json" # rfc6902 patch try: - ss = self._apps_v1.patch_namespaced_stateful_set( - server_name, - self.namespace, - patch, - ) - except ApiException as err: + await sts.patch(patch, type=patch_type) + except ServerError as err: if err.status == 404: # NOTE: It can happen potentially that another request or something else # deleted the session as this request was going on, in this case we ignore # the missing statefulset return None raise - return ss + return sts - def delete_server(self, server_name: str, forced: bool = False) -> Any: + async def delete_server(self, server_name: str) -> None: """Delete the server.""" + server = await self._kr8s_type(dict(metadata=dict(name=server_name, namespace=self.namespace))) try: - status = self._custom_objects.delete_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - name=server_name, - grace_period_seconds=0 if forced else None, - body=V1DeleteOptions(propagation_policy="Foreground"), - ) - except ApiException as e: - logger.exception(f"Cannot delete server {server_name} because of {e}") + await server.delete(propagation_policy="Foreground") + except ServerError as e: + logging.exception(f"Cannot delete server {server_name} because of {e}") raise DeleteServerError() - return status + return None - def get_server(self, name: str) -> Optional[dict[str, Any]]: + async def get_server(self, name: str) -> _SessionType | None: """Get a specific JupyterServer object.""" try: - js = cast( - dict[str, Any], - self._custom_objects.get_namespaced_custom_object( - name=name, - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - ), - ) - except ApiException as err: + server = await self._kr8s_type.get(name=name, namespace=self.namespace) + except NotFoundError: + return None + except ServerError as err: if err.status not in [400, 404]: - logger.exception(f"Cannot get server {name} because of {err}") + logging.exception(f"Cannot get server {name} because of {err}") raise IntermittentError(f"Cannot get server {name} from the k8s API.") return None - return js + return self.server_type.model_validate(server.to_dict()) - def list_servers(self, label_selector: Optional[str] = None) -> list[dict[str, Any]]: + async def list_servers(self, label_selector: Optional[str] = None) -> list[_SessionType]: """Get a list of k8s jupyterserver objects for a specific user.""" try: - jss = self._custom_objects.list_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - label_selector=label_selector, - ) - except ApiException as err: + servers = await self._kr8s_type.list(namespace=self.namespace, label_selector=label_selector) + except ServerError as err: if err.status not in [400, 404]: - logger.exception(f"Cannot list servers because of {err}") + logging.exception(f"Cannot list servers because of {err}") raise IntermittentError(f"Cannot list servers from the k8s API with selector {label_selector}.") return [] - return cast(list[dict[str, Any]], jss.get("items", [])) + output: list[_SessionType] = ( + [self.server_type.model_validate(servers.to_dict())] + if isinstance(servers, APIObject) + else [self.server_type.model_validate(server.to_dict()) for server in servers] + ) + return output - def patch_image_pull_secret(self, server_name: str, gitlab_token: GitlabToken) -> None: + async def patch_image_pull_secret(self, server_name: str, gitlab_token: GitlabToken) -> None: """Patch the image pull secret used in a Renku session.""" secret_name = f"{server_name}-image-secret" try: - secret = self._core_v1.read_namespaced_secret(secret_name, self.namespace) - except ApiException as err: - if err.status == 404: - # NOTE: In many cases the session does not have an image pull secret - # this happens when the repo for the project is public so images are public - return - raise - old_docker_config = json.loads(base64.b64decode(secret.data[".dockerconfigjson"]).decode()) + secret = cast(Secret, await Secret.get(name=secret_name, namespace=self.namespace)) + except NotFoundError: + return None + secret_data = secret.data.to_dict() + old_docker_config = json.loads(base64.b64decode(secret_data[".dockerconfigjson"]).decode()) hostname = next(iter(old_docker_config["auths"].keys()), None) if not hostname: raise ProgrammingError( @@ -259,26 +234,17 @@ def patch_image_pull_secret(self, server_name: str, gitlab_token: GitlabToken) - "value": base64.b64encode(json.dumps(new_docker_config).encode()).decode(), } ] - self._core_v1.patch_namespaced_secret( - secret_name, - self.namespace, - patch, - ) + await secret.patch(patch, type="json") - def patch_statefulset_tokens(self, name: str, renku_tokens: RenkuTokens) -> None: + async def patch_statefulset_tokens(self, name: str, renku_tokens: RenkuTokens) -> None: """Patch the Renku and Gitlab access tokens that are used in the session statefulset.""" try: - sts = self._apps_v1.read_namespaced_stateful_set(name, self.namespace) - except ApiException as err: - if err.status == 404: - # NOTE: It can happen potentially that another request or something else - # deleted the session as this request was going on, in this case we ignore - # the missing statefulset - return - raise + sts = cast(StatefulSet, await StatefulSet.get(name=name, namespace=self.namespace)) + except NotFoundError: + return None - containers: list[V1Container] = sts.spec.template.spec.containers - init_containers: list[V1Container] = sts.spec.template.spec.init_containers + containers: list[V1Container] = [V1Container(**i) for i in sts.spec.template.spec.containers] + init_containers: list[V1Container] = [V1Container(**i) for i in sts.spec.template.spec.init_containers] git_proxy_container_index, git_proxy_container = next( ((i, c) for i, c in enumerate(containers) if c.name == "git-proxy"), @@ -359,50 +325,61 @@ def patch_statefulset_tokens(self, name: str, renku_tokens: RenkuTokens) -> None ) if not patches: - return + return None - self._apps_v1.patch_namespaced_stateful_set( - name, - self.namespace, - patches, - ) + await sts.patch(patches, type="json") + async def create_secret(self, secret: V1Secret) -> V1Secret: + """Create a new secret.""" -class JsServerCache: + new_secret = await Secret(self.sanitize(secret), self.namespace) + await new_secret.create() + return V1Secret(metadata=new_secret.metadata, data=new_secret.data, type=new_secret.raw.get("type")) + + async def delete_secret(self, name: str) -> None: + """Delete a secret.""" + secret = await Secret(dict(metadata=dict(name=name, namespace=self.namespace))) + with suppress(NotFoundError): + await secret.delete() + return None + + +class ServerCache(Generic[_SessionType]): """Utility class for calling the jupyter server cache.""" - def __init__(self, url: str): + def __init__(self, url: str, server_type: type[_SessionType]): self.url = url + self.client = httpx.AsyncClient() + self.server_type: type[_SessionType] = server_type - def list_servers(self, safe_username: str) -> list[dict[str, Any]]: + async def list_servers(self, safe_username: str) -> list[_SessionType]: """List the jupyter servers.""" url = urljoin(self.url, f"/users/{safe_username}/servers") try: - res = requests.get(url, timeout=10) - res.raise_for_status() - except requests.HTTPError as err: - logger.warning( + res = await self.client.get(url, timeout=10) + except httpx.RequestError as err: + logging.warning(f"Jupyter server cache at {url} cannot be reached: {err}") + raise JSCacheError("The jupyter server cache is not available") + if res.status_code != 200: + logging.warning( f"Listing servers at {url} from " f"jupyter server cache failed with status code: {res.status_code} " - f"and error: {err}" + f"and body: {res.text}" ) - raise JSCacheError(f"The JSCache produced an unexpected status code: {err}") from err - except requests.RequestException as err: - logger.warning(f"Jupyter server cache at {url} cannot be reached: {err}") - raise JSCacheError("The jupyter server cache is not available") from err + raise JSCacheError(f"The JSCache produced an unexpected status code: {res.status_code}") - return cast(list[dict[str, Any]], res.json()) + return [self.server_type.model_validate(i) for i in res.json()] - def get_server(self, name: str) -> Optional[dict[str, Any]]: + async def get_server(self, name: str) -> _SessionType | None: """Get a specific jupyter server.""" url = urljoin(self.url, f"/servers/{name}") try: - res = requests.get(url, timeout=10) - except requests.exceptions.RequestException as err: - logger.warning(f"Jupyter server cache at {url} cannot be reached: {err}") + res = await self.client.get(url, timeout=10) + except httpx.RequestError as err: + logging.warning(f"Jupyter server cache at {url} cannot be reached: {err}") raise JSCacheError("The jupyter server cache is not available") if res.status_code != 200: - logger.warning( + logging.warning( f"Reading server at {url} from " f"jupyter server cache failed with status code: {res.status_code} " f"and body: {res.text}" @@ -413,163 +390,116 @@ def get_server(self, name: str) -> Optional[dict[str, Any]]: return None if len(output) > 1: raise ProgrammingError(f"Expected to find 1 server when getting server {name}, " f"found {len(output)}.") - return cast(dict[str, Any], output[0]) + return self.server_type.model_validate(output[0]) -class K8sClient: +class K8sClient(Generic[_SessionType, _Kr8sType]): """The K8s client that combines a namespaced client and a jupyter server cache.""" def __init__( self, - js_cache: JsServerCache, - renku_ns_client: NamespacedK8sClient, + cache: ServerCache[_SessionType], + renku_ns_client: NamespacedK8sClient[_SessionType, _Kr8sType], username_label: str, - session_ns_client: Optional[NamespacedK8sClient] = None, ): - self.js_cache = js_cache - self.renku_ns_client = renku_ns_client + self.cache: ServerCache[_SessionType] = cache + self.renku_ns_client: NamespacedK8sClient[_SessionType, _Kr8sType] = renku_ns_client self.username_label = username_label - self.session_ns_client = session_ns_client if not self.username_label: raise ProgrammingError("username_label has to be provided to K8sClient") + self.sanitize = self.renku_ns_client.sanitize - def list_servers(self, safe_username: str) -> list[dict[str, Any]]: + async def list_servers(self, safe_username: str) -> list[_SessionType]: """Get a list of servers that belong to a user. Attempt to use the cache first but if the cache fails then use the k8s API. """ try: - return self.js_cache.list_servers(safe_username) + return await self.cache.list_servers(safe_username) except JSCacheError: - logger.warning(f"Skipping the cache to list servers for user: {safe_username}") + logging.warning(f"Skipping the cache to list servers for user: {safe_username}") label_selector = f"{self.username_label}={safe_username}" - return self.renku_ns_client.list_servers(label_selector) + ( - self.session_ns_client.list_servers(label_selector) if self.session_ns_client is not None else [] - ) + return await self.renku_ns_client.list_servers(label_selector) - def get_server(self, name: str, safe_username: str) -> Optional[dict[str, Any]]: + async def get_server(self, name: str, safe_username: str) -> _SessionType | None: """Attempt to get a specific server by name from the cache. If the request to the cache fails, fallback to the k8s API. """ server = None try: - server = self.js_cache.get_server(name) + server = await self.cache.get_server(name) except JSCacheError: - output = [] - res = None - if self.session_ns_client is not None: - res = self.session_ns_client.get_server(name) - if res: - output.append(res) - res = self.renku_ns_client.get_server(name) - if res: - output.append(res) - if len(output) > 1: - raise ProgrammingError( - "Expected less than two results for searching for " f"server {name}, but got {len(output)}" - ) - if len(output) == 0: - return None - server = output[0] + server = await self.renku_ns_client.get_server(name) - if server and server.get("metadata", {}).get("labels", {}).get(self.username_label) != safe_username: + if server and server.metadata and server.metadata.labels.get(self.username_label) != safe_username: return None return server - def get_server_logs( + async def get_server_logs( self, server_name: str, safe_username: str, max_log_lines: Optional[int] = None ) -> dict[str, str]: """Get the logs from the server.""" - server = self.get_server(server_name, safe_username) - if server is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user {safe_username} to read the logs from." - ) - containers = list(server.get("status", {}).get("containerStates", {}).get("init", {}).keys()) + list( - server.get("status", {}).get("containerStates", {}).get("regular", {}).keys() - ) - namespace = server.get("metadata", {}).get("namespace") + # NOTE: this get_server ensures the user has access to the server without it you could read someone elses logs + _ = await self.get_server(server_name, safe_username) pod_name = f"{server_name}-0" - if namespace == self.renku_ns_client.namespace: - return self.renku_ns_client.get_pod_logs(pod_name, containers, max_log_lines) - if self.session_ns_client is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user {safe_username} to read the logs from." - ) - return self.session_ns_client.get_pod_logs(pod_name, containers, max_log_lines) + return await self.renku_ns_client.get_pod_logs(pod_name, max_log_lines) - def get_secret(self, name: str) -> Optional[dict[str, Any]]: + async def _get_secret(self, name: str) -> Secret | None: """Get a specific secret.""" - if self.session_ns_client is not None: - secret = self.session_ns_client.get_secret(name) - if secret: - return secret - return self.renku_ns_client.get_secret(name) + return await self.renku_ns_client.get_secret(name) - def create_server(self, manifest: dict[str, Any], safe_username: str) -> dict[str, Any]: + async def create_server(self, manifest: _SessionType, safe_username: str) -> _SessionType: """Create a server.""" - server_name = manifest.get("metadata", {}).get("name") - server = self.get_server(server_name, safe_username) + server_name = manifest.metadata.name + server = await self.get_server(server_name, safe_username) if server: # NOTE: server already exists return server - if not self.session_ns_client: - return self.renku_ns_client.create_server(manifest) - return self.session_ns_client.create_server(manifest) + manifest.metadata.labels[self.username_label] = safe_username + return await self.renku_ns_client.create_server(manifest) - def patch_server( + async def patch_server( self, server_name: str, safe_username: str, patch: dict[str, Any] | list[dict[str, Any]] - ) -> dict[str, Any]: + ) -> _SessionType: """Patch a server.""" - server = self.get_server(server_name, safe_username) + server = await self.get_server(server_name, safe_username) if not server: raise MissingResourceError( f"Cannot find server {server_name} for user " f"{safe_username} in order to patch it." ) + return await self.renku_ns_client.patch_server(server_name=server_name, patch=patch) - namespace = server.get("metadata", {}).get("namespace") - - if namespace == self.renku_ns_client.namespace: - return self.renku_ns_client.patch_server(server_name=server_name, patch=patch) - if self.session_ns_client is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user " f"{safe_username} in order to patch it." - ) - return self.session_ns_client.patch_server(server_name=server_name, patch=patch) - - def patch_statefulset( + async def patch_statefulset( self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]] - ) -> client.V1StatefulSet | None: + ) -> StatefulSet | None: """Patch a statefulset.""" - client = self.session_ns_client if self.session_ns_client else self.renku_ns_client - return client.patch_statefulset(server_name=server_name, patch=patch) + client = self.renku_ns_client + return await client.patch_statefulset(server_name=server_name, patch=patch) - def delete_server(self, server_name: str, safe_username: str, forced: bool = False) -> None: + async def delete_server(self, server_name: str, safe_username: str) -> None: """Delete the server.""" - server = self.get_server(server_name, safe_username) + server = await self.get_server(server_name, safe_username) if not server: - raise MissingResourceError( - f"Cannot find server {server_name} for user " f"{safe_username} in order to delete it." - ) - namespace = server.get("metadata", {}).get("namespace") - if namespace == self.renku_ns_client.namespace: - self.renku_ns_client.delete_server(server_name, forced) - if self.session_ns_client is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user " f"{safe_username} in order to delete it." - ) - self.session_ns_client.delete_server(server_name, forced) + return None + await self.renku_ns_client.delete_server(server_name) + return None - def patch_tokens(self, server_name: str, renku_tokens: RenkuTokens, gitlab_token: GitlabToken) -> None: + async def patch_tokens(self, server_name: str, renku_tokens: RenkuTokens, gitlab_token: GitlabToken) -> None: """Patch the Renku and Gitlab access tokens used in a session.""" - client = self.session_ns_client if self.session_ns_client else self.renku_ns_client - client.patch_statefulset_tokens(server_name, renku_tokens) - client.patch_image_pull_secret(server_name, gitlab_token) + client = self.renku_ns_client + await client.patch_statefulset_tokens(server_name, renku_tokens) + await client.patch_image_pull_secret(server_name, gitlab_token) @property def preferred_namespace(self) -> str: """Get the preferred namespace for creating jupyter servers.""" - if self.session_ns_client is not None: - return self.session_ns_client.namespace return self.renku_ns_client.namespace + + async def create_secret(self, secret: V1Secret) -> V1Secret: + """Create a secret.""" + return await self.renku_ns_client.create_secret(secret) + + async def delete_secret(self, name: str) -> None: + """Delete a secret.""" + return await self.renku_ns_client.delete_secret(name) diff --git a/components/renku_data_services/notebooks/api/classes/server.py b/components/renku_data_services/notebooks/api/classes/server.py index 50b1de0f4..9b430c943 100644 --- a/components/renku_data_services/notebooks/api/classes/server.py +++ b/components/renku_data_services/notebooks/api/classes/server.py @@ -3,29 +3,32 @@ from abc import ABC from collections.abc import Sequence from itertools import chain -from pathlib import Path +from pathlib import PurePosixPath from typing import Any from urllib.parse import urljoin, urlparse from sanic.log import logger -from ...config import _NotebooksConfig -from ...errors.programming import ConfigurationError, DuplicateEnvironmentVariableError -from ...errors.user import MissingResourceError -from ..amalthea_patches import cloudstorage as cloudstorage_patches -from ..amalthea_patches import general as general_patches -from ..amalthea_patches import git_proxy as git_proxy_patches -from ..amalthea_patches import git_sidecar as git_sidecar_patches -from ..amalthea_patches import init_containers as init_containers_patches -from ..amalthea_patches import inject_certificates as inject_certificates_patches -from ..amalthea_patches import jupyter_server as jupyter_server_patches -from ..amalthea_patches import ssh as ssh_patches -from ..schemas.secrets import K8sUserSecrets -from ..schemas.server_options import ServerOptions -from .cloud_storage import ICloudStorageRequest -from .k8s_client import K8sClient -from .repository import GitProvider, Repository -from .user import AnonymousUser, RegisteredUser +from renku_data_services.base_models import AnonymousAPIUser, AuthenticatedAPIUser +from renku_data_services.base_models.core import APIUser +from renku_data_services.notebooks.api.amalthea_patches import cloudstorage as cloudstorage_patches +from renku_data_services.notebooks.api.amalthea_patches import general as general_patches +from renku_data_services.notebooks.api.amalthea_patches import git_proxy as git_proxy_patches +from renku_data_services.notebooks.api.amalthea_patches import git_sidecar as git_sidecar_patches +from renku_data_services.notebooks.api.amalthea_patches import init_containers as init_containers_patches +from renku_data_services.notebooks.api.amalthea_patches import inject_certificates as inject_certificates_patches +from renku_data_services.notebooks.api.amalthea_patches import jupyter_server as jupyter_server_patches +from renku_data_services.notebooks.api.amalthea_patches import ssh as ssh_patches +from renku_data_services.notebooks.api.classes.cloud_storage import ICloudStorageRequest +from renku_data_services.notebooks.api.classes.k8s_client import JupyterServerV1Alpha1Kr8s, K8sClient +from renku_data_services.notebooks.api.classes.repository import GitProvider, Repository +from renku_data_services.notebooks.api.classes.user import NotebooksGitlabClient +from renku_data_services.notebooks.api.schemas.secrets import K8sUserSecrets +from renku_data_services.notebooks.api.schemas.server_options import ServerOptions +from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.crs import JupyterServerV1Alpha1 +from renku_data_services.notebooks.errors.programming import DuplicateEnvironmentVariableError +from renku_data_services.notebooks.errors.user import MissingResourceError class UserServer(ABC): @@ -33,7 +36,7 @@ class UserServer(ABC): def __init__( self, - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str, image: str | None, server_options: ServerOptions, @@ -41,18 +44,18 @@ def __init__( user_secrets: K8sUserSecrets | None, cloudstorage: Sequence[ICloudStorageRequest], k8s_client: K8sClient, - workspace_mount_path: Path, - work_dir: Path, + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, config: _NotebooksConfig, + internal_gitlab_user: APIUser, using_default_image: bool = False, is_image_private: bool = False, repositories: list[Repository] | None = None, ): - self._check_flask_config() self._user = user self.server_name = server_name - self._k8s_client: K8sClient = k8s_client - self.safe_username = self._user.safe_username + self._k8s_client: K8sClient[JupyterServerV1Alpha1, JupyterServerV1Alpha1Kr8s] = k8s_client + self.safe_username = self._user.id self.image = image self.server_options = server_options self.environment_variables = environment_variables @@ -63,13 +66,14 @@ def __init__( self.cloudstorage = cloudstorage self.is_image_private = is_image_private self.config = config + self.internal_gitlab_user = internal_gitlab_user if self.server_options.idle_threshold_seconds is not None: self.idle_seconds_threshold = self.server_options.idle_threshold_seconds else: self.idle_seconds_threshold = ( config.sessions.culling.registered.idle_seconds - if isinstance(self._user, RegisteredUser) + if isinstance(self._user, AuthenticatedAPIUser) else config.sessions.culling.anonymous.idle_seconds ) @@ -78,7 +82,7 @@ def __init__( else: self.hibernated_seconds_threshold = ( config.sessions.culling.registered.hibernated_seconds - if isinstance(user, RegisteredUser) + if isinstance(user, AuthenticatedAPIUser) else config.sessions.culling.anonymous.hibernated_seconds ) self._repositories: list[Repository] = repositories or [] @@ -86,7 +90,7 @@ def __init__( self._has_configured_git_providers = False @property - def user(self) -> AnonymousUser | RegisteredUser: + def user(self) -> AnonymousAPIUser | AuthenticatedAPIUser: """Getter for server's user.""" return self._user @@ -95,14 +99,14 @@ def k8s_client(self) -> K8sClient: """Return server's k8s client.""" return self._k8s_client - @property - def repositories(self) -> list[Repository]: + async def repositories(self) -> list[Repository]: """Get the list of repositories in the project.""" # Configure git repository providers based on matching URLs. if not self._has_configured_git_providers: + git_providers = await self.git_providers() for repo in self._repositories: found_provider = None - for provider in self.git_providers: + for provider in git_providers: if urlparse(provider.url).netloc == urlparse(repo.url).netloc: found_provider = provider break @@ -115,33 +119,33 @@ def repositories(self) -> list[Repository]: @property def server_url(self) -> str: """The URL where a user can access their session.""" - if type(self._user) is RegisteredUser: + if self._user.is_authenticated: return urljoin( f"https://{self.config.sessions.ingress.host}", f"sessions/{self.server_name}", ) return urljoin( f"https://{self.config.sessions.ingress.host}", - f"sessions/{self.server_name}?token={self._user.username}", + f"sessions/{self.server_name}?token={self._user.id}", ) - @property - def git_providers(self) -> list[GitProvider]: + async def git_providers(self) -> list[GitProvider]: """The list of git providers.""" if self._git_providers is None: - self._git_providers = self.config.git_provider_helper.get_providers(user=self.user) + self._git_providers = await self.config.git_provider_helper.get_providers(user=self.user) return self._git_providers - @property - def required_git_providers(self) -> list[GitProvider]: + async def required_git_providers(self) -> list[GitProvider]: """The list of required git providers.""" - required_provider_ids: set[str] = set(r.provider for r in self.repositories if r.provider) - return [p for p in self.git_providers if p.id in required_provider_ids] + repositories = await self.repositories() + required_provider_ids: set[str] = set(r.provider for r in repositories if r.provider) + providers = await self.git_providers() + return [p for p in providers if p.id in required_provider_ids] def __str__(self) -> str: - return f"" + return f"" - def start(self) -> dict[str, Any] | None: + async def start(self) -> JupyterServerV1Alpha1 | None: """Create the jupyterserver resource in k8s.""" errors = self._get_start_errors() if errors: @@ -151,19 +155,8 @@ def start(self) -> dict[str, Any] | None: f"or Docker resources are missing: {', '.join(errors)}" ) ) - return self._k8s_client.create_server(self._get_session_manifest(), self.safe_username) - - def _check_flask_config(self) -> None: - """Check the app config and ensure minimum required parameters are present.""" - if self.config.git.url is None: - raise ConfigurationError( - message="The gitlab URL is missing, it must be provided in an environment variable called GITLAB_URL" - ) - if self.config.git.registry is None: - raise ConfigurationError( - message="The url to the docker image registry is missing, it must be provided in " - "an environment variable called IMAGE_REGISTRY" - ) + manifest = JupyterServerV1Alpha1.model_validate(await self._get_session_manifest()) + return await self._k8s_client.create_server(manifest, self.safe_username) @staticmethod def _check_environment_variables_overrides(patches_list: list[dict[str, Any]]) -> None: @@ -199,9 +192,9 @@ def _get_start_errors(self) -> list[str]: errors.append(f"image {self.image} does not exist or cannot be accessed") return errors - def _get_session_manifest(self) -> dict[str, Any]: + async def _get_session_manifest(self) -> dict[str, Any]: """Compose the body of the user session for the k8s operator.""" - patches = self._get_patches() + patches = await self._get_patches() self._check_environment_variables_overrides(patches) # Storage @@ -211,7 +204,7 @@ def _get_session_manifest(self) -> dict[str, Any]: "pvc": { "enabled": True, "storageClassName": self.config.sessions.storage.pvs_storage_class, - "mountPath": self.workspace_mount_path.absolute().as_posix(), + "mountPath": self.workspace_mount_path.as_posix(), }, } else: @@ -220,24 +213,24 @@ def _get_session_manifest(self) -> dict[str, Any]: "size": storage_size, "pvc": { "enabled": False, - "mountPath": self.workspace_mount_path.absolute().as_posix(), + "mountPath": self.workspace_mount_path.as_posix(), }, } # Authentication - if isinstance(self._user, RegisteredUser): + if isinstance(self._user, AuthenticatedAPIUser): session_auth = { "token": "", "oidc": { "enabled": True, "clientId": self.config.sessions.oidc.client_id, "clientSecret": {"value": self.config.sessions.oidc.client_secret}, - "issuerUrl": self._user.oidc_issuer, + "issuerUrl": self.config.sessions.oidc.issuer_url, "authorizedEmails": [self._user.email], }, } else: session_auth = { - "token": self._user.username, + "token": self._user.id, "oidc": {"enabled": False}, } # Combine everything into the manifest @@ -255,7 +248,7 @@ def _get_session_manifest(self) -> dict[str, Any]: "idleSecondsThreshold": self.idle_seconds_threshold, "maxAgeSecondsThreshold": ( self.config.sessions.culling.registered.max_age_seconds - if isinstance(self._user, RegisteredUser) + if isinstance(self._user, AuthenticatedAPIUser) else self.config.sessions.culling.anonymous.max_age_seconds ), "hibernatedSecondsThreshold": self.hibernated_seconds_threshold, @@ -263,7 +256,7 @@ def _get_session_manifest(self) -> dict[str, Any]: "jupyterServer": { "defaultUrl": self.server_options.default_url, "image": self.image, - "rootDir": self.work_dir.absolute().as_posix(), + "rootDir": self.work_dir.as_posix(), "resources": self.server_options.to_k8s_resources( enforce_cpu_limits=self.config.sessions.enforce_cpu_limits ), @@ -286,7 +279,7 @@ def _get_session_manifest(self) -> dict[str, Any]: def _get_renku_annotation_prefix(self) -> str: return self.config.session_get_endpoint_annotations.renku_annotation_prefix - def _get_patches(self) -> list[dict[str, Any]]: + async def _get_patches(self) -> list[dict[str, Any]]: return list( chain( general_patches.test(self), @@ -297,25 +290,25 @@ def _get_patches(self) -> list[dict[str, Any]]: general_patches.dev_shm(self), jupyter_server_patches.args(), jupyter_server_patches.env(self), - jupyter_server_patches.image_pull_secret(self), + jupyter_server_patches.image_pull_secret(self, self.internal_gitlab_user.access_token), jupyter_server_patches.disable_service_links(), jupyter_server_patches.rstudio_env_variables(self), jupyter_server_patches.user_secrets(self), - git_proxy_patches.main(self), - git_sidecar_patches.main(self), + await git_proxy_patches.main(self), + await git_sidecar_patches.main(self), general_patches.oidc_unverified_email(self), ssh_patches.main(self.config), # init container for certs must come before all other init containers # so that it runs first before all other init containers init_containers_patches.certificates(self.config), init_containers_patches.download_image(self), - init_containers_patches.git_clone(self), + await init_containers_patches.git_clone(self), inject_certificates_patches.proxy(self), # Cloud Storage needs to patch the git clone sidecar spec and so should come after # the sidecars # WARN: this patch depends on the index of the sidecar and so needs to be updated # if sidercars are added or removed - cloudstorage_patches.main(self), + await cloudstorage_patches.main(self), ) ) @@ -339,8 +332,8 @@ def get_annotations(self) -> dict[str, str | None]: annotations = { f"{prefix}commit-sha": None, f"{prefix}gitlabProjectId": None, - f"{prefix}safe-username": self._user.safe_username, - f"{prefix}username": self._user.username, + f"{prefix}safe-username": self._user.id, + f"{prefix}username": self._user.id, f"{prefix}userId": self._user.id, f"{prefix}servername": self.server_name, f"{prefix}branch": None, @@ -369,7 +362,7 @@ class Renku1UserServer(UserServer): def __init__( self, - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str, namespace: str, project: str, @@ -382,14 +375,18 @@ def __init__( user_secrets: K8sUserSecrets | None, cloudstorage: Sequence[ICloudStorageRequest], k8s_client: K8sClient, - workspace_mount_path: Path, - work_dir: Path, + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, config: _NotebooksConfig, + gitlab_client: NotebooksGitlabClient, + internal_gitlab_user: APIUser, using_default_image: bool = False, is_image_private: bool = False, ): + self.gitlab_client = gitlab_client + self.internal_gitlab_user = internal_gitlab_user gitlab_project_name = f"{namespace}/{project}" - gitlab_project = user.get_renku_project(gitlab_project_name) + gitlab_project = self.gitlab_client.get_renku_project(gitlab_project_name) single_repository = ( Repository( url=gitlab_project.http_url_to_repo, @@ -416,6 +413,7 @@ def __init__( is_image_private=is_image_private, repositories=[single_repository] if single_repository is not None else [], config=config, + internal_gitlab_user=internal_gitlab_user, ) self.namespace = namespace @@ -494,7 +492,7 @@ class Renku2UserServer(UserServer): def __init__( self, - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, image: str, project_id: str, launcher_id: str, @@ -504,10 +502,11 @@ def __init__( user_secrets: K8sUserSecrets | None, cloudstorage: Sequence[ICloudStorageRequest], k8s_client: K8sClient, - workspace_mount_path: Path, - work_dir: Path, + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, repositories: list[Repository], config: _NotebooksConfig, + internal_gitlab_user: APIUser, using_default_image: bool = False, is_image_private: bool = False, ): @@ -526,6 +525,7 @@ def __init__( is_image_private=is_image_private, repositories=repositories, config=config, + internal_gitlab_user=internal_gitlab_user, ) self.project_id = project_id diff --git a/components/renku_data_services/notebooks/api/classes/server_manifest.py b/components/renku_data_services/notebooks/api/classes/server_manifest.py index 0a220b430..3987f4fba 100644 --- a/components/renku_data_services/notebooks/api/classes/server_manifest.py +++ b/components/renku_data_services/notebooks/api/classes/server_manifest.py @@ -4,13 +4,15 @@ import json from typing import Any, Optional, cast -from .cloud_storage.existing import ExistingCloudStorage +from renku_data_services.errors import errors +from renku_data_services.notebooks.api.classes.cloud_storage.existing import ExistingCloudStorage +from renku_data_services.notebooks.crs import JupyterServerV1Alpha1 class UserServerManifest: """Thin wrapper around a jupyter server manifest.""" - def __init__(self, manifest: dict[str, Any], default_image: str, pvs_enabled: bool = True) -> None: + def __init__(self, manifest: JupyterServerV1Alpha1, default_image: str, pvs_enabled: bool = True) -> None: self.manifest = manifest self.default_image = default_image self.pvs_enabled = pvs_enabled @@ -18,12 +20,14 @@ def __init__(self, manifest: dict[str, Any], default_image: str, pvs_enabled: bo @property def name(self) -> str: """The name of the server.""" - return cast(str, self.manifest["metadata"]["name"]) + return self.manifest.metadata.name @property def image(self) -> str: """The image the server is running.""" - return cast(str, self.manifest["spec"]["jupyterServer"]["image"]) + if self.manifest.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") + return self.manifest.spec.jupyterServer.image @property def using_default_image(self) -> bool: @@ -31,14 +35,16 @@ def using_default_image(self) -> bool: return self.image == self.default_image @property - def server_options(self) -> dict[str, Any]: + def server_options(self) -> dict[str, str | int | float]: """Extract the server options from a manifest.""" js = self.manifest - server_options = {} + if js.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") + server_options: dict[str, str | int | float] = {} # url - server_options["defaultUrl"] = js["spec"]["jupyterServer"]["defaultUrl"] + server_options["defaultUrl"] = js.spec.jupyterServer.defaultUrl # disk - server_options["disk_request"] = js["spec"]["storage"].get("size") + server_options["disk_request"] = js.spec.storage.size # NOTE: Amalthea accepts only strings for disk request, but k8s allows bytes as number # so try to convert to number if possible with contextlib.suppress(ValueError): @@ -50,7 +56,7 @@ def server_options(self) -> dict[str, Any]: "cpu": "cpu_request", "ephemeral-storage": "ephemeral-storage", } - js_resources = js["spec"]["jupyterServer"]["resources"]["requests"] + js_resources = js.spec.jupyterServer.resources["requests"] for k8s_res_name in k8s_res_name_xref: if k8s_res_name in js_resources: server_options[k8s_res_name_xref[k8s_res_name]] = js_resources[k8s_res_name] @@ -60,8 +66,8 @@ def server_options(self) -> dict[str, Any]: server_options["ephemeral-storage"] if self.pvs_enabled else server_options["disk_request"] ) # lfs auto fetch - for patches in js["spec"]["patches"]: - for patch in patches.get("patch", []): + for patches in js.spec.patches: + for patch in cast(dict, patches.patch): if patch.get("path") == "/statefulset/spec/template/spec/initContainers/-": for env in patch.get("value", {}).get("env", []): if env.get("name") == "GIT_CLONE_LFS_AUTO_FETCH": @@ -71,12 +77,12 @@ def server_options(self) -> dict[str, Any]: @property def annotations(self) -> dict[str, str]: """Extract the manifest annotations.""" - return cast(dict[str, str], self.manifest["metadata"]["annotations"]) + return self.manifest.metadata.annotations @property def labels(self) -> dict[str, str]: """Extract the manifest labels.""" - return cast(dict[str, str], self.manifest["metadata"]["labels"]) + return self.manifest.metadata.labels @property def cloudstorage(self) -> list[ExistingCloudStorage]: @@ -86,12 +92,12 @@ def cloudstorage(self) -> list[ExistingCloudStorage]: @property def server_name(self) -> str: """Get the server name.""" - return cast(str, self.manifest["metadata"]["name"]) + return self.manifest.metadata.name @property def hibernation(self) -> Optional[dict[str, Any]]: """Return hibernation annotation.""" - hibernation = self.manifest["metadata"]["annotations"].get("hibernation") + hibernation = self.manifest.metadata.annotations.get("hibernation") return json.loads(hibernation) if hibernation else None @property @@ -120,9 +126,11 @@ def hibernation_branch(self) -> Optional[str]: @property def url(self) -> str: """Return the url where the user can access the session.""" - host = self.manifest["spec"]["routing"]["host"] - path = self.manifest["spec"]["routing"]["path"].rstrip("/") - token = self.manifest["spec"]["auth"].get("token", "") + if self.manifest.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") + host = self.manifest.spec.routing.host + path = self.manifest.spec.routing.path.rstrip("/") + token = self.manifest.spec.auth.token or "" url = f"https://{host}{path}" if token and len(token) > 0: url += f"?token={token}" diff --git a/components/renku_data_services/notebooks/api/classes/user.py b/components/renku_data_services/notebooks/api/classes/user.py index a760557d2..0700759a1 100644 --- a/components/renku_data_services/notebooks/api/classes/user.py +++ b/components/renku_data_services/notebooks/api/classes/user.py @@ -1,112 +1,18 @@ """Notebooks user model definitions.""" -import base64 -import json -import re from functools import lru_cache -from math import floor -from typing import Any, Optional, Protocol, cast -import escapism -import jwt from gitlab import Gitlab from gitlab.v4.objects.projects import Project from gitlab.v4.objects.users import CurrentUser from sanic.log import logger -from ...errors.user import AuthenticationError +class NotebooksGitlabClient: + """Client for gitlab to be used only in the notebooks, will be eventually eliminated.""" -class User(Protocol): - """Representation of a user that is calling the API.""" - - access_token: str | None = None - git_token: str | None = None - gitlab_client: Gitlab - username: str - - @lru_cache(maxsize=8) - def get_renku_project(self, namespace_project: str) -> Optional[Project]: - """Retrieve the GitLab project.""" - try: - return self.gitlab_client.projects.get(f"{namespace_project}") - except Exception as e: - logger.warning(f"Cannot get project: {namespace_project} for user: {self.username}, error: {e}") - return None - - -class AnonymousUser(User): - """Anonymous user.""" - - auth_header = "Renku-Auth-Anon-Id" - - def __init__(self, headers: dict, gitlab_url: str): - self.authenticated = ( - self.auth_header in headers - and headers[self.auth_header] != "" - # The anonymous id must start with an alphanumeric character - and re.match(r"^[a-zA-Z0-9]", headers[self.auth_header]) is not None - ) - if not self.authenticated: - return - self.git_url = gitlab_url - self.gitlab_client = Gitlab(self.git_url, api_version="4", per_page=50) - self.username = headers[self.auth_header] - self.safe_username = escapism.escape(self.username, escape_char="-").lower() - self.full_name = None - self.email = None - self.oidc_issuer = None - self.git_token = None - self.git_token_expires_at = 0 - self.access_token = None - self.refresh_token = None - self.id = headers[self.auth_header] - - def __str__(self) -> str: - return f"" - - -class RegisteredUser(User): - """Registered user.""" - - auth_headers = [ - "Renku-Auth-Access-Token", - "Renku-Auth-Id-Token", - ] - git_header = "Renku-Auth-Git-Credentials" - - def __init__(self, headers: dict[str, str]): - self.authenticated = all([header in headers for header in self.auth_headers]) - if not self.authenticated: - return - if not headers.get(self.git_header): - raise AuthenticationError( - "Your Gitlab credentials are invalid or expired, " - "please login Renku, or fully log out and log back in." - ) - - parsed_id_token = self.parse_jwt_from_headers(headers) - self.email = parsed_id_token["email"] - self.full_name = parsed_id_token["name"] - self.username = parsed_id_token["preferred_username"] - self.safe_username = escapism.escape(self.username, escape_char="-").lower() - self.oidc_issuer = parsed_id_token["iss"] - self.id = parsed_id_token["sub"] - self.access_token = headers["Renku-Auth-Access-Token"] - self.refresh_token = headers["Renku-Auth-Refresh-Token"] - - ( - self.git_url, - self.git_auth_header, - self.git_token, - self.git_token_expires_at, - ) = self.git_creds_from_headers(headers) - self.gitlab_client = Gitlab( - self.git_url, - api_version="4", - oauth_token=self.git_token, - per_page=50, - ) + def __init__(self, url: str, gitlab_token: str | None = None): + self.gitlab_client = Gitlab(url, api_version="4", oauth_token=gitlab_token, per_page=50) @property def gitlab_user(self) -> CurrentUser | None: @@ -115,43 +21,11 @@ def gitlab_user(self) -> CurrentUser | None: self.gitlab_client.auth() return self.gitlab_client.user - @staticmethod - def parse_jwt_from_headers(headers: dict[str, str]) -> dict[str, Any]: - """Parse the JWT.""" - # No need to verify the signature because this is already done by the gateway - decoded = jwt.decode(headers["Renku-Auth-Id-Token"], options={"verify_signature": False}) - decoded = cast(dict[str, Any], decoded) - return decoded - - @staticmethod - def git_creds_from_headers(headers: dict[str, str]) -> tuple[str, str, str, int]: - """Extract the git credentials from a header.""" - parsed_dict = json.loads(base64.decodebytes(headers["Renku-Auth-Git-Credentials"].encode())) - git_url, git_credentials = next(iter(parsed_dict.items())) - if not isinstance(git_url, str) or not isinstance(git_credentials, dict): - raise AuthenticationError(message="Could not successfully decode the git credentials header") - token_match = re.match(r"^[^\s]+\ ([^\s]+)$", git_credentials["AuthorizationHeader"]) - git_token = token_match.group(1) if token_match is not None else None - if not isinstance(git_token, str): - raise AuthenticationError(message="Could not successfully decode the git credentials header") - git_token_expires_at = git_credentials.get("AccessTokenExpiresAt") - if git_token_expires_at is None: - # INFO: Indicates that the token does not expire - git_token_expires_at = -1 - else: - try: - # INFO: Sometimes this can be a float, sometimes an int - git_token_expires_at = float(git_token_expires_at) - except ValueError: - git_token_expires_at = -1 - else: - git_token_expires_at = floor(git_token_expires_at) - return ( - git_url, - git_credentials["AuthorizationHeader"], - git_token, - git_token_expires_at, - ) - - def __str__(self) -> str: - return f"" + @lru_cache(maxsize=8) + def get_renku_project(self, namespace_project: str) -> Project | None: + """Retrieve the GitLab project.""" + try: + return self.gitlab_client.projects.get(f"{namespace_project}") + except Exception as e: + logger.warning(f"Cannot find the gitlab project: {namespace_project}, error: {e}") + return None diff --git a/components/renku_data_services/notebooks/api/schemas/cloud_storage.py b/components/renku_data_services/notebooks/api/schemas/cloud_storage.py index 917092552..05b141c3c 100644 --- a/components/renku_data_services/notebooks/api/schemas/cloud_storage.py +++ b/components/renku_data_services/notebooks/api/schemas/cloud_storage.py @@ -7,8 +7,8 @@ from marshmallow import EXCLUDE, Schema, ValidationError, fields, validates_schema +from renku_data_services.base_models import APIUser from renku_data_services.notebooks.api.classes.cloud_storage import ICloudStorageRequest -from renku_data_services.notebooks.api.classes.user import User from renku_data_services.notebooks.config import _NotebooksConfig @@ -45,8 +45,8 @@ def __init__( name: Optional[str], config: _NotebooksConfig, ) -> None: + """Creates a cloud storage instance without validating the configuration.""" self.config = config - self.config.storage_validator.validate_storage_configuration(configuration, source_path) self.configuration = configuration self.source_path = source_path self.mount_folder = mount_folder @@ -54,10 +54,11 @@ def __init__( self.name = name @classmethod - def storage_from_schema( + async def storage_from_schema( cls, data: dict[str, Any], - user: User, + user: APIUser, + internal_gitlab_user: APIUser, project_id: int, work_dir: Path, config: _NotebooksConfig, @@ -76,7 +77,9 @@ def storage_from_schema( target_path, readonly, name, - ) = config.storage_validator.get_storage_by_id(user, project_id, data["storage_id"]) + ) = await config.storage_validator.get_storage_by_id( + user, internal_gitlab_user, project_id, data["storage_id"] + ) configuration = {**configuration, **(configuration or {})} readonly = readonly else: @@ -86,6 +89,7 @@ def storage_from_schema( readonly = data.get("readonly", True) mount_folder = str(work_dir / target_path) + await config.storage_validator.validate_storage_configuration(configuration, source_path) return cls(source_path, configuration, readonly, mount_folder, name, config) def get_manifest_patch( diff --git a/components/renku_data_services/notebooks/api/schemas/server_options.py b/components/renku_data_services/notebooks/api/schemas/server_options.py index c62846ef8..1b421c378 100644 --- a/components/renku_data_services/notebooks/api/schemas/server_options.py +++ b/components/renku_data_services/notebooks/api/schemas/server_options.py @@ -6,6 +6,7 @@ from marshmallow import Schema, fields +from renku_data_services.crc.models import ResourceClass from renku_data_services.notebooks.api.schemas.custom_fields import ByteSizeField, CpuField, GpuField from renku_data_services.notebooks.config.dynamic import CPUEnforcement from renku_data_services.notebooks.errors.programming import ProgrammingError @@ -176,19 +177,22 @@ def to_k8s_resources(self, enforce_cpu_limits: CPUEnforcement = CPUEnforcement.O return resources @classmethod - def from_resource_class(cls, data: dict[str, Any]) -> Self: + def from_resource_class(cls, data: ResourceClass) -> Self: """Convert a CRC resource class to server options. Data Service uses GB for storage and memory whereas the notebook service uses bytes so we convert to bytes here. """ return cls( - cpu=data["cpu"], - memory=data["memory"] * 1000000000, - gpu=data["gpu"], - storage=data["default_storage"] * 1000000000, - node_affinities=[NodeAffinity(**a) for a in data.get("node_affinities", [])], - tolerations=[Toleration(t) for t in data.get("tolerations", [])], - resource_class_id=data.get("id"), + cpu=data.cpu, + memory=data.memory * 1_000_000_000, + gpu=data.gpu, + storage=data.default_storage * 1_000_000_000, + node_affinities=[ + NodeAffinity(key=a.key, required_during_scheduling=a.required_during_scheduling) + for a in data.node_affinities + ], + tolerations=[Toleration(t) for t in data.tolerations], + resource_class_id=data.id, ) @classmethod diff --git a/components/renku_data_services/notebooks/api/schemas/servers_get.py b/components/renku_data_services/notebooks/api/schemas/servers_get.py index e1ca99d94..cbe43b3f9 100644 --- a/components/renku_data_services/notebooks/api/schemas/servers_get.py +++ b/components/renku_data_services/notebooks/api/schemas/servers_get.py @@ -124,7 +124,7 @@ class Meta: unknown = EXCLUDE - annotations = fields.Nested(_ServersGetEndpointAnnotations().schema()) + annotations = fields.Nested(_ServersGetEndpointAnnotations) name = fields.Str() state = fields.Dict() started = fields.DateTime(format="iso", allow_none=True) @@ -250,8 +250,8 @@ def get_unschedulable_message(pod: dict[str, Any]) -> str | None: def get_all_container_statuses(server: UserServerManifest) -> list[dict[str, Any]]: return cast( list[dict[str, Any]], - server.manifest["status"].get("mainPod", {}).get("status", {}).get("containerStatuses", []) - + server.manifest["status"].get("mainPod", {}).get("status", {}).get("initContainerStatuses", []), + server.manifest.status.get("mainPod", {}).get("status", {}).get("containerStatuses", []) + + server.manifest.status.get("mainPod", {}).get("status", {}).get("initContainerStatuses", []), ) def get_failed_containers(container_statuses: list[dict[str, Any]]) -> list[dict[str, Any]]: @@ -275,17 +275,17 @@ def get_starting_message(step_summary: list[dict[str, Any]]) -> str | None: def is_user_anonymous(server: UserServerManifest, prefix: str = "renku.io/") -> bool: js = server.manifest - annotations = js.get("metadata", {}).get("annotations", {}) + annotations = js.metadata.annotations return ( - str(annotations.get(f"{prefix}userId", "")).startswith("anon-") - and str(annotations.get(f"{prefix}username", "")).startswith("anon-") - and str(js.get("metadata", {}).get("name", "")).startswith("anon-") + annotations.get(f"{prefix}userId", "").startswith("anon-") + and annotations.get(f"{prefix}username", "").startswith("anon-") + and js.metadata.name.startswith("anon-") ) def get_status_breakdown(server: UserServerManifest) -> list[dict[str, Any]]: js = server.manifest - init_container_summary = js.get("status", {}).get("containerStates", {}).get("init", {}) - container_summary = js.get("status", {}).get("containerStates", {}).get("regular", {}) + init_container_summary = js.status.get("containerStates", {}).get("init", {}) + container_summary = js.status.get("containerStates", {}).get("regular", {}) output = [] init_container_name_desc_xref = OrderedDict( [ @@ -303,7 +303,7 @@ def get_status_breakdown(server: UserServerManifest) -> list[dict[str, Any]]: ("jupyter-server", "Starting session"), ] ) - current_state = js.get("status", {}).get("state") + current_state = js.status.get("state") if current_state is None or current_state == ServerStatusEnum.Starting.value: # NOTE: This means that the server is starting and the statuses are not populated # yet, therefore in this case we will use defaults and set all statuses to waiting @@ -341,16 +341,16 @@ def get_status_breakdown(server: UserServerManifest) -> list[dict[str, Any]]: def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[str, Any]]: """Get the status of the jupyterserver.""" - state = server.manifest.get("status", {}).get("state", ServerStatusEnum.Starting.value) + state = server.manifest.status.get("state", ServerStatusEnum.Starting.value) output = { "state": state, } container_statuses = get_all_container_statuses(server) if state == ServerStatusEnum.Failed.value: failed_container_statuses = get_failed_containers(container_statuses) - unschedulable_msg = get_unschedulable_message(server.manifest.get("status", {}).get("mainPod", {})) + unschedulable_msg = get_unschedulable_message(server.manifest.status.get("mainPod", {})) event_based_messages = [] - events = server.manifest.get("status", {}).get("events", {}) + events = server.manifest.status.get("events", {}) for component in sorted(events.keys()): message = events.get(component, {}).get("message") if message is None: @@ -376,11 +376,12 @@ def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[ output["warnings"].append({"message": "Server was started using the default image."}) now = datetime.now(UTC) - annotations = server.manifest.get("metadata", {}).get("annotations", {}) + annotations = server.manifest.metadata.annotations last_activity_date_str = annotations.get("renku.io/lastActivityDate") - idle_threshold = server.manifest.get("spec", {}).get("culling", {}).get("idleSecondsThreshold", 0) + assert server.manifest.spec is not None + idle_threshold = server.manifest.spec.culling.idleSecondsThreshold critical: bool = False if idle_threshold > 0 and last_activity_date_str: @@ -401,9 +402,7 @@ def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[ hibernation_date_str = annotations.get("renku.io/hibernationDate") - hibernated_seconds_threshold = ( - server.manifest.get("spec", {}).get("culling", {}).get("hibernatedSecondsThreshold", 0) - ) + hibernated_seconds_threshold = server.manifest.spec.culling.hibernatedSecondsThreshold if hibernation_date_str and hibernated_seconds_threshold > 0 and not is_user_anonymous(server): hibernation_date = datetime.fromisoformat(hibernation_date_str) @@ -421,7 +420,7 @@ def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[ } ) - max_age_threshold = server.manifest.get("spec", {}).get("culling", {}).get("maxAgeSecondsThreshold", 0) + max_age_threshold = server.manifest.spec.culling.maxAgeSecondsThreshold age = (datetime.now(UTC) - started).total_seconds() remaining_session_time = max_age_threshold - age @@ -464,7 +463,7 @@ def get_resource_requests(server: UserServerManifest) -> dict[str, Any]: def get_resource_usage( server: UserServerManifest, ) -> dict[str, Union[str, int]]: - usage = server.manifest.get("status", {}).get("mainPod", {}).get("resourceUsage", {}) + usage = server.manifest.status.get("mainPod", {}).get("resourceUsage", {}) formatted_output = {} if "cpuMillicores" in usage: formatted_output["cpu"] = usage["cpuMillicores"] / 1000 @@ -474,7 +473,8 @@ def get_resource_usage( formatted_output["storage"] = usage["disk"]["usedBytes"] return formatted_output - started = datetime.fromisoformat(re.sub(r"Z$", "+00:00", server.manifest["metadata"]["creationTimestamp"])) + assert server.manifest.metadata.creationTimestamp is not None + started = server.manifest.metadata.creationTimestamp output = { "annotations": config.session_get_endpoint_annotations.sanitize_dict( @@ -486,7 +486,7 @@ def get_resource_usage( } ), "name": server.name, - "state": {"pod_name": server.manifest["status"].get("mainPod", {}).get("name")}, + "state": {"pod_name": server.manifest.status.get("mainPod", {}).get("name")}, "started": started, "status": get_status(server, started), "url": server.url, diff --git a/components/renku_data_services/notebooks/apispec.py b/components/renku_data_services/notebooks/apispec.py index 59f611355..acd59804c 100644 --- a/components/renku_data_services/notebooks/apispec.py +++ b/components/renku_data_services/notebooks/apispec.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: api.spec.yaml -# timestamp: 2024-08-28T09:26:11+00:00 +# timestamp: 2024-09-23T08:31:51+00:00 from __future__ import annotations @@ -34,16 +34,10 @@ class DefaultCullingThresholds(BaseAPISpec): registered: CullingThreshold -class Error(BaseAPISpec): - code: int = Field(..., example=1404, gt=0) - detail: Optional[str] = Field( - None, example="A more detailed optional message showing what the problem was" - ) - message: str = Field(..., example="Something went wrong - please try again later") - - -class ErrorResponse(BaseAPISpec): - error: Error +class ErrorResponseNested(BaseAPISpec): + code: int + detail: Optional[str] = None + message: str class Generated(BaseAPISpec): @@ -156,7 +150,7 @@ class StringServerOptionsChoice(BaseAPISpec): class UserPodResources(BaseAPISpec): - requests: ResourceRequests + requests: Optional[ResourceRequests] = None usage: Optional[ResourceUsage] = None @@ -244,11 +238,13 @@ class SessionStatus(BaseAPISpec): class SessionResourcesRequests(BaseAPISpec): - cpu: float = Field(..., description="Fractional CPUs") - gpu: int = Field(0, description="Number of GPUs used") - memory: int = Field(..., description="Ammount of RAM for the session, in gigabytes") - storage: int = Field( - ..., description="The size of disk storage for the session, in gigabytes" + cpu: Optional[float] = Field(None, description="Fractional CPUs") + gpu: Optional[int] = Field(None, description="Number of GPUs used") + memory: Optional[int] = Field( + None, description="Ammount of RAM for the session, in gigabytes" + ) + storage: Optional[int] = Field( + None, description="The size of disk storage for the session, in gigabytes" ) @@ -297,10 +293,14 @@ class SessionsImagesGetParametersQuery(BaseAPISpec): image_url: str +class ErrorResponse(BaseAPISpec): + error: ErrorResponseNested + + class LaunchNotebookRequest(BaseAPISpec): project_id: str launcher_id: str - image: str + image: Optional[str] = None repositories: List[LaunchNotebookRequestRepository] = [] cloudstorage: List[RCloneStorageRequest] = [] storage: int = 1 @@ -349,7 +349,13 @@ class NotebookResponse(BaseAPISpec): annotations: Optional[FieldUserPodAnnotations] = None cloudstorage: Optional[List[LaunchNotebookResponseCloudStorage]] = None image: Optional[str] = None - name: Optional[str] = None + name: Optional[str] = Field( + None, + example="d185e68d-d43-renku-2-b9ac279a4e8a85ac28d08", + max_length=50, + min_length=5, + pattern="^[a-z]([-a-z0-9]*[a-z0-9])?$", + ) resources: Optional[UserPodResources] = None started: Optional[datetime] = None state: Optional[Dict[str, Any]] = None @@ -378,7 +384,13 @@ class SessionPostRequest(BaseAPISpec): class SessionResponse(BaseAPISpec): image: str - name: str + name: str = Field( + ..., + example="d185e68d-d43-renku-2-b9ac279a4e8a85ac28d08", + max_length=50, + min_length=5, + pattern="^[a-z]([-a-z0-9]*[a-z0-9])?$", + ) resources: SessionResources started: Optional[datetime] = Field(...) status: SessionStatus diff --git a/components/renku_data_services/notebooks/blueprints.py b/components/renku_data_services/notebooks/blueprints.py index c38df8347..79059e6cf 100644 --- a/components/renku_data_services/notebooks/blueprints.py +++ b/components/renku_data_services/notebooks/blueprints.py @@ -1,55 +1,98 @@ """Notebooks service API.""" +import base64 import json as json_lib +import logging +import os from dataclasses import dataclass from datetime import UTC, datetime +from math import floor from pathlib import Path from typing import Any +from urllib.parse import urljoin, urlparse import requests from gitlab.const import Visibility as GitlabVisibility from gitlab.v4.objects.projects import Project as GitlabProject +from kubernetes.client import V1ObjectMeta, V1Secret from marshmallow import ValidationError -from sanic import Request, json +from sanic import Request, empty, json from sanic.log import logger from sanic.response import HTTPResponse, JSONResponse from sanic_ext import validate +from toml import dumps +from ulid import ULID +from yaml import safe_dump +from renku_data_services import base_models +from renku_data_services.base_api.auth import authenticate, authenticate_2 from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint +from renku_data_services.base_models import AnonymousAPIUser, APIUser, AuthenticatedAPIUser, Authenticator +from renku_data_services.crc.db import ResourcePoolRepository from renku_data_services.errors import errors from renku_data_services.notebooks import apispec +from renku_data_services.notebooks.api.amalthea_patches import git_proxy, init_containers from renku_data_services.notebooks.api.classes.auth import GitlabToken, RenkuTokens from renku_data_services.notebooks.api.classes.image import Image from renku_data_services.notebooks.api.classes.repository import Repository from renku_data_services.notebooks.api.classes.server import Renku1UserServer, Renku2UserServer, UserServer from renku_data_services.notebooks.api.classes.server_manifest import UserServerManifest -from renku_data_services.notebooks.api.classes.user import AnonymousUser, RegisteredUser +from renku_data_services.notebooks.api.classes.user import NotebooksGitlabClient from renku_data_services.notebooks.api.schemas.cloud_storage import RCloneStorage from renku_data_services.notebooks.api.schemas.config_server_options import ServerOptionsEndpointResponse from renku_data_services.notebooks.api.schemas.logs import ServerLogs from renku_data_services.notebooks.api.schemas.secrets import K8sUserSecrets from renku_data_services.notebooks.api.schemas.server_options import ServerOptions -from renku_data_services.notebooks.api.schemas.servers_get import NotebookResponse, ServersGetResponse +from renku_data_services.notebooks.api.schemas.servers_get import ( + NotebookResponse, + ServersGetResponse, +) from renku_data_services.notebooks.api.schemas.servers_patch import PatchServerStatusEnum from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.crs import ( + AmaltheaSessionSpec, + AmaltheaSessionV1Alpha1, + Authentication, + AuthenticationType, + Culling, + ExtraContainer, + ExtraVolume, + ExtraVolumeMount, + Ingress, + InitContainer, + Metadata, + Resources, + SecretAsVolume, + SecretAsVolumeItem, + SecretRef, + Session, + SessionEnvItem, + State, + Storage, + TlsSecret, +) from renku_data_services.notebooks.errors.intermittent import AnonymousUserPatchError, PVDisabledError from renku_data_services.notebooks.errors.programming import ProgrammingError from renku_data_services.notebooks.errors.user import MissingResourceError, UserInputError -from renku_data_services.notebooks.util.authn import NotebooksAuthenticator, notebooks_authenticate from renku_data_services.notebooks.util.kubernetes_ import ( find_container, renku_1_make_server_name, renku_2_make_server_name, ) from renku_data_services.notebooks.util.repository import get_status +from renku_data_services.project.db import ProjectRepository +from renku_data_services.repositories.db import GitRepositoriesRepository +from renku_data_services.session.db import SessionRepository @dataclass(kw_only=True) class NotebooksBP(CustomBlueprint): """Handlers for manipulating notebooks.""" - authenticator: NotebooksAuthenticator + authenticator: Authenticator nb_config: _NotebooksConfig + git_repo: GitRepositoriesRepository + internal_gitlab_authenticator: base_models.Authenticator def version(self) -> BlueprintFactoryResponse: """Return notebook services version.""" @@ -82,18 +125,18 @@ async def _version(_: Request) -> JSONResponse: } return json(info) - return "/version", ["GET"], _version + return "/notebooks/version", ["GET"], _version def user_servers(self) -> BlueprintFactoryResponse: """Return a JSON of running servers for the user.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _user_servers( - request: Request, user: AnonymousUser | RegisteredUser, **query_params: dict + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, **query_params: dict ) -> JSONResponse: servers = [ UserServerManifest(s, self.nb_config.sessions.default_image) - for s in self.nb_config.k8s_client.list_servers(user.safe_username) + for s in await self.nb_config.k8s_client.list_servers(user.id) ] filter_attrs = list(filter(lambda x: x[1] is not None, request.get_query_args())) filtered_servers = {} @@ -108,11 +151,11 @@ async def _user_servers( def user_server(self) -> BlueprintFactoryResponse: """Returns a user server based on its ID.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _user_server( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str ) -> JSONResponse: - server = self.nb_config.k8s_client.get_server(server_name, user.safe_username) + server = await self.nb_config.k8s_client.get_server(server_name, user.id) if server is None: raise MissingResourceError(message=f"The server {server_name} does not exist.") server = UserServerManifest(server, self.nb_config.sessions.default_image) @@ -121,23 +164,26 @@ async def _user_server( return "/notebooks/servers/", ["GET"], _user_server def launch_notebook(self) -> BlueprintFactoryResponse: - """Start a renku session using the old operator in renku v2.""" + """Start a renku session.""" - @notebooks_authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @validate(json=apispec.LaunchNotebookRequest) async def _launch_notebook( - request: Request, user: RegisteredUser | AnonymousUser, body: apispec.LaunchNotebookRequest + request: Request, + user: AnonymousAPIUser | AuthenticatedAPIUser, + internal_gitlab_user: APIUser, + body: apispec.LaunchNotebookRequest, ) -> JSONResponse: server_name = renku_2_make_server_name( - safe_username=user.safe_username, project_id=body.project_id, launcher_id=body.launcher_id + safe_username=user.id, project_id=body.project_id, launcher_id=body.launcher_id ) server_class = Renku2UserServer - server, status_code = self.launch_notebook_helper( + server, status_code = await self.launch_notebook_helper( nb_config=self.nb_config, server_name=server_name, server_class=server_class, user=user, - image=body.image, + image=body.image or self.nb_config.sessions.default_image, resource_class_id=body.resource_class_id, storage=body.storage, environment_variables=body.environment_variables, @@ -156,24 +202,27 @@ async def _launch_notebook( project_id=body.project_id, launcher_id=body.launcher_id, repositories=body.repositories, + internal_gitlab_user=internal_gitlab_user, ) return json(NotebookResponse().dump(server), status_code) return "/notebooks/servers", ["POST"], _launch_notebook def launch_notebook_old(self) -> BlueprintFactoryResponse: - """Start a renku session using the old operator renku v1.""" + """Start a renku session using the old operator.""" - @notebooks_authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @validate(json=apispec.LaunchNotebookRequestOld) async def _launch_notebook_old( - request: Request, user: RegisteredUser | AnonymousUser, body: apispec.LaunchNotebookRequestOld + request: Request, + user: AnonymousAPIUser | AuthenticatedAPIUser, + internal_gitlab_user: APIUser, + body: apispec.LaunchNotebookRequestOld, ) -> JSONResponse: - server_name = renku_1_make_server_name( - user.safe_username, body.namespace, body.project, body.branch, body.commit_sha - ) + server_name = renku_1_make_server_name(user.id, body.namespace, body.project, body.branch, body.commit_sha) project_slug = f"{body.namespace}/{body.project}" - gl_project = user.get_renku_project(project_slug) + gitlab_client = NotebooksGitlabClient(self.nb_config.git.url, APIUser.access_token) + gl_project = gitlab_client.get_renku_project(project_slug) if gl_project is None: raise errors.MissingResourceError(message=f"Cannot find gitlab project with slug {project_slug}") gl_project_path = gl_project.path @@ -188,7 +237,7 @@ async def _launch_notebook_old( else None ) - server, status_code = self.launch_notebook_helper( + server, status_code = await self.launch_notebook_helper( nb_config=self.nb_config, server_name=server_name, server_class=server_class, @@ -212,17 +261,18 @@ async def _launch_notebook_old( project_id=None, launcher_id=None, repositories=None, + internal_gitlab_user=internal_gitlab_user, ) return json(NotebookResponse().dump(server), status_code) return "/notebooks/old/servers", ["POST"], _launch_notebook_old @staticmethod - def launch_notebook_helper( + async def launch_notebook_helper( nb_config: _NotebooksConfig, server_name: str, server_class: type[UserServer], - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, image: str, resource_class_id: int | None, storage: int | None, @@ -242,9 +292,10 @@ def launch_notebook_helper( project_id: str | None, # Renku 2.0 launcher_id: str | None, # Renku 2.0 repositories: list[apispec.LaunchNotebookRequestRepository] | None, # Renku 2.0 + internal_gitlab_user: APIUser, ) -> tuple[UserServerManifest, int]: """Helper function to launch a Jupyter server.""" - server = nb_config.k8s_client.get_server(server_name, user.safe_username) + server = await nb_config.k8s_client.get_server(server_name, user.id) if server: return UserServerManifest( @@ -262,8 +313,12 @@ def launch_notebook_helper( image_repo = parsed_image.repo_api() image_exists_publicly = image_repo.image_exists(parsed_image) image_exists_privately = False - if not image_exists_publicly and parsed_image.hostname == nb_config.git.registry and user.git_token: - image_repo = image_repo.with_oauth2_token(user.git_token) + if ( + not image_exists_publicly + and parsed_image.hostname == nb_config.git.registry + and internal_gitlab_user.access_token + ): + image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token) image_exists_privately = image_repo.image_exists(parsed_image) if not image_exists_privately and not image_exists_publicly: using_default_image = True @@ -288,8 +343,8 @@ def launch_notebook_helper( # non-authenticated users. Also, a nice footgun from the Gitlab API Python library. is_image_private = getattr(gl_project, "visibility", GitlabVisibility.PUBLIC) != GitlabVisibility.PUBLIC image_repo = parsed_image.repo_api() - if is_image_private and user.git_token: - image_repo = image_repo.with_oauth2_token(user.git_token) + if is_image_private and internal_gitlab_user.access_token: + image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token) if not image_repo.image_exists(parsed_image): raise MissingResourceError( message=( @@ -303,7 +358,9 @@ def launch_notebook_helper( parsed_server_options: ServerOptions | None = None if resource_class_id is not None: # A resource class ID was passed in, validate with CRC service - parsed_server_options = nb_config.crc_validator.validate_class_storage(user, resource_class_id, storage) + parsed_server_options = await nb_config.crc_validator.validate_class_storage( + user, resource_class_id, storage + ) elif server_options is not None: if isinstance(server_options, dict): requested_server_options = ServerOptions( @@ -322,7 +379,7 @@ def launch_notebook_helper( f"launching sessions: {type(server_options)}" ) # The old style API was used, try to find a matching class from the CRC service - parsed_server_options = nb_config.crc_validator.find_acceptable_class(user, requested_server_options) + parsed_server_options = await nb_config.crc_validator.find_acceptable_class(user, requested_server_options) if parsed_server_options is None: raise UserInputError( message="Cannot find suitable server options based on your request and " @@ -333,15 +390,15 @@ def launch_notebook_helper( ) else: # No resource class ID specified or old-style server options, use defaults from CRC - default_resource_class = nb_config.crc_validator.get_default_class() - max_storage_gb = default_resource_class.get("max_storage", 0) + default_resource_class = await nb_config.crc_validator.get_default_class() + max_storage_gb = default_resource_class.max_storage if storage is not None and storage > max_storage_gb: raise UserInputError( "The requested storage amount is higher than the " f"allowable maximum for the default resource class of {max_storage_gb}GB." ) if storage is None: - storage = default_resource_class.get("default_storage") or 1 + storage = default_resource_class.default_storage parsed_server_options = ServerOptions.from_resource_class(default_resource_class) # Storage in request is in GB parsed_server_options.set_storage(storage, gigabytes=True) @@ -363,12 +420,13 @@ def launch_notebook_helper( try: for cstorage in cloudstorage: storages.append( - RCloneStorage.storage_from_schema( + await RCloneStorage.storage_from_schema( cstorage.model_dump(), user=user, project_id=gl_project_id, work_dir=server_work_dir.absolute(), config=nb_config, + internal_gitlab_user=internal_gitlab_user, ) ) except ValidationError as e: @@ -421,18 +479,18 @@ def launch_notebook_helper( detail="This can occur if your username has been changed manually or by an admin.", ) - manifest = server.start() + manifest = await server.start() if manifest is None: raise errors.ProgrammingError(message="Failed to start server.") - logger.debug(f"Server {server.server_name} has been started") + logging.debug(f"Server {server.server_name} has been started") if k8s_user_secret is not None: owner_reference = { "apiVersion": "amalthea.dev/v1alpha1", "kind": "JupyterServer", "name": server.server_name, - "uid": manifest["metadata"]["uid"], + "uid": manifest.metadata.uid, "controller": True, } request_data = { @@ -443,8 +501,8 @@ def launch_notebook_helper( } headers = {"Authorization": f"bearer {user.access_token}"} - def _on_error(server_name: str, error_msg: str) -> None: - nb_config.k8s_client.delete_server(server_name, forced=True, safe_username=user.safe_username) + async def _on_error(server_name: str, error_msg: str) -> None: + await nb_config.k8s_client.delete_server(server_name, safe_username=user.id) raise RuntimeError(error_msg) try: @@ -455,42 +513,48 @@ def _on_error(server_name: str, error_msg: str) -> None: timeout=10, ) except requests.exceptions.ConnectionError: - _on_error(server.server_name, "User secrets storage service could not be contacted {exc}") + await _on_error(server.server_name, "User secrets storage service could not be contacted {exc}") if response.status_code != 201: - _on_error(server.server_name, f"User secret could not be created {response.json()}") + await _on_error(server.server_name, f"User secret could not be created {response.json()}") return UserServerManifest(manifest, nb_config.sessions.default_image), 201 def patch_server(self) -> BlueprintFactoryResponse: """Patch a user server by name based on the query param.""" - @notebooks_authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @validate(json=apispec.PatchServerRequest) async def _patch_server( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str, body: apispec.PatchServerRequest + request: Request, + user: AnonymousAPIUser | AuthenticatedAPIUser, + internal_gitlab_user: APIUser, + server_name: str, + body: apispec.PatchServerRequest, ) -> JSONResponse: if not self.nb_config.sessions.storage.pvs_enabled: raise PVDisabledError() - if isinstance(user, AnonymousUser): + if isinstance(user, AnonymousAPIUser): raise AnonymousUserPatchError() patch_body = body - server = self.nb_config.k8s_client.get_server(server_name, user.safe_username) + server = await self.nb_config.k8s_client.get_server(server_name, user.id) if server is None: raise errors.MissingResourceError(message=f"The server with name {server_name} cannot be found") + if server.spec is None: + raise errors.ProgrammingError(message="The server manifest is absent") new_server = server - currently_hibernated = server.get("spec", {}).get("jupyterServer", {}).get("hibernated", False) - currently_failing = server.get("status", {}).get("state", "running") == "failed" + currently_hibernated = server.spec.jupyterServer.hibernated + currently_failing = server.status.get("state", "running") == "failed" state = PatchServerStatusEnum.from_api_state(body.state) if body.state is not None else None resource_class_id = patch_body.resource_class_id if server and not (currently_hibernated or currently_failing) and resource_class_id: raise UserInputError("The resource class can be changed only if the server is hibernated or failing") if resource_class_id: - parsed_server_options = self.nb_config.crc_validator.validate_class_storage( + parsed_server_options = await self.nb_config.crc_validator.validate_class_storage( user, resource_class_id, storage=None, # we do not care about validating storage @@ -517,7 +581,7 @@ async def _patch_server( "value": parsed_server_options.priority_class, } ) - elif server.get("metadata", {}).get("labels", {}).get("renku.io/quota"): + elif server.metadata.labels.get("renku.io/quota"): js_patch.append( { "op": "remove", @@ -525,8 +589,8 @@ async def _patch_server( "path": "/metadata/labels/renku.io~1quota", } ) - new_server = self.nb_config.k8s_client.patch_server( - server_name=server_name, safe_username=user.safe_username, patch=js_patch + new_server = await self.nb_config.k8s_client.patch_server( + server_name=server_name, safe_username=user.id, patch=js_patch ) ss_patch: list[dict[str, Any]] = [ { @@ -535,11 +599,11 @@ async def _patch_server( "value": parsed_server_options.priority_class, } ] - self.nb_config.k8s_client.patch_statefulset(server_name=server_name, patch=ss_patch) + await self.nb_config.k8s_client.patch_statefulset(server_name=server_name, patch=ss_patch) if state == PatchServerStatusEnum.Hibernated: # NOTE: Do nothing if server is already hibernated - currently_hibernated = server.get("spec", {}).get("jupyterServer", {}).get("hibernated", False) + currently_hibernated = server.spec.jupyterServer.hibernated if server and currently_hibernated: logger.warning(f"Server {server_name} is already hibernated.") @@ -549,7 +613,7 @@ async def _patch_server( hibernation: dict[str, str | bool] = {"branch": "", "commit": "", "dirty": "", "synchronized": ""} - sidecar_patch = find_container(server.get("spec", {}).get("patches", []), "git-sidecar") + sidecar_patch = find_container(server.spec.patches, "git-sidecar") status = ( get_status( server_name=server_name, @@ -587,8 +651,8 @@ async def _patch_server( }, } - new_server = self.nb_config.k8s_client.patch_server( - server_name=server_name, safe_username=user.safe_username, patch=patch + new_server = await self.nb_config.k8s_client.patch_server( + server_name=server_name, safe_username=user.id, patch=patch ) elif state == PatchServerStatusEnum.Running: # NOTE: We clear hibernation annotations in Amalthea to avoid flickering in the UI (showing @@ -602,35 +666,41 @@ async def _patch_server( } # NOTE: The tokens in the session could expire if the session is hibernated long enough, # here we inject new ones to make sure everything is valid when the session starts back up. - if user.access_token is None or user.refresh_token is None or user.git_token is None: + if user.access_token is None or user.refresh_token is None or internal_gitlab_user.access_token is None: raise errors.UnauthorizedError( message="Cannot patch the server if the user is not fully logged in." ) renku_tokens = RenkuTokens(access_token=user.access_token, refresh_token=user.refresh_token) - gitlab_token = GitlabToken(access_token=user.git_token, expires_at=user.git_token_expires_at) - self.nb_config.k8s_client.patch_tokens(server_name, renku_tokens, gitlab_token) - new_server = self.nb_config.k8s_client.patch_server( - server_name=server_name, safe_username=user.safe_username, patch=patch + gitlab_token = GitlabToken( + access_token=internal_gitlab_user.access_token, + expires_at=( + floor(user.access_token_expires_at.timestamp()) + if user.access_token_expires_at is not None + else -1 + ), + ) + await self.nb_config.k8s_client.patch_tokens(server_name, renku_tokens, gitlab_token) + new_server = await self.nb_config.k8s_client.patch_server( + server_name=server_name, safe_username=user.id, patch=patch ) return json( NotebookResponse().dump(UserServerManifest(new_server, self.nb_config.sessions.default_image)), 200 ) - return "/notebooks/servers", ["POST"], _patch_server + return "/notebooks/servers/", ["PATCH"], _patch_server def stop_server(self) -> BlueprintFactoryResponse: """Stop user server by name.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _stop_server( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str ) -> HTTPResponse: - forced: bool = request.query_args.get("forced") == "true" - self.nb_config.k8s_client.delete_server(server_name, forced=forced, safe_username=user.safe_username) + await self.nb_config.k8s_client.delete_server(server_name, safe_username=user.id) return HTTPResponse(status=204) - return "/notebooks/servers", ["DELETE"], _stop_server + return "/notebooks/servers/", ["DELETE"], _stop_server def server_options(self) -> BlueprintFactoryResponse: """Return a set of configurable server options.""" @@ -652,15 +722,15 @@ async def _server_options(request: Request) -> JSONResponse: def server_logs(self) -> BlueprintFactoryResponse: """Return the logs of the running server.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _server_logs( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str ) -> JSONResponse: max_lines = int(request.query_args.get("max_lines", 250)) logs = self.nb_config.k8s_client.get_server_logs( server_name=server_name, max_log_lines=max_lines, - safe_username=user.safe_username, + safe_username=user.id, ) return json(ServerLogs().dump(logs)) @@ -669,18 +739,334 @@ async def _server_logs( def check_docker_image(self) -> BlueprintFactoryResponse: """Return the availability of the docker image.""" - @notebooks_authenticate(self.authenticator) - async def _check_docker_image(request: Request, user: RegisteredUser | AnonymousUser) -> HTTPResponse: + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) + async def _check_docker_image( + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, internal_gitlab_user: APIUser + ) -> HTTPResponse: image_url = request.query_args.get("image_url") if not isinstance(image_url, str): raise ValueError("required string of image url") parsed_image = Image.from_path(image_url) image_repo = parsed_image.repo_api() - if parsed_image.hostname == self.nb_config.git.registry and user.git_token: - image_repo = image_repo.with_oauth2_token(user.git_token) + if parsed_image.hostname == self.nb_config.git.registry and internal_gitlab_user.access_token: + image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token) if image_repo.image_exists(parsed_image): return HTTPResponse(status=200) else: return HTTPResponse(status=404) return "/notebooks/images", ["GET"], _check_docker_image + + +@dataclass(kw_only=True) +class NotebooksNewBP(CustomBlueprint): + """Handlers for manipulating notebooks for the new Amalthea operator.""" + + authenticator: base_models.Authenticator + internal_gitlab_authenticator: base_models.Authenticator + nb_config: _NotebooksConfig + project_repo: ProjectRepository + session_repo: SessionRepository + rp_repo: ResourcePoolRepository + + def start(self) -> BlueprintFactoryResponse: + """Start a session with the new operator.""" + + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) + @validate(json=apispec.SessionPostRequest) + async def _handler( + _: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + internal_gitlab_user: APIUser, + body: apispec.SessionPostRequest, + ) -> JSONResponse: + # gitlab_client = NotebooksGitlabClient(self.nb_config.git.url, internal_gitlab_user.access_token) + launcher = await self.session_repo.get_launcher(user, ULID.from_str(body.launcher_id)) + project = await self.project_repo.get_project(user=user, project_id=launcher.project_id) + server_name = renku_2_make_server_name( + safe_username=user.id, project_id=str(launcher.project_id), launcher_id=body.launcher_id + ) + existing_session = await self.nb_config.k8s_v2_client.get_server(server_name, user.id) + if existing_session is not None and existing_session.spec is not None: + return json(existing_session.as_apispec().model_dump(exclude_none=True, mode="json")) + environment = launcher.environment + image = environment.container_image + default_resource_class = await self.rp_repo.get_default_resource_class() + if default_resource_class.id is None: + raise errors.ProgrammingError(message="The default reosurce class has to have an ID", quiet=True) + resource_class_id = body.resource_class_id or default_resource_class.id + parsed_server_options = await self.nb_config.crc_validator.validate_class_storage( + user, resource_class_id, body.disk_storage + ) + work_dir = Path("/home/jovyan/work") + user_secrets: K8sUserSecrets | None = None + # if body.user_secrets: + # user_secrets = K8sUserSecrets( + # name=server_name, + # user_secret_ids=body.user_secrets.user_secret_ids, + # mount_path=body.user_secrets.mount_path, + # ) + cloud_storage: list[RCloneStorage] = [] + # repositories = [Repository(i.url, branch=i.branch, commit_sha=i.commit_sha) for i in body.repositories] + repositories = [Repository(url=i) for i in project.repositories] + server = Renku2UserServer( + user=user, + image=image, + project_id=str(launcher.project_id), + launcher_id=body.launcher_id, + server_name=server_name, + server_options=parsed_server_options, + environment_variables={}, + user_secrets=user_secrets, + cloudstorage=cloud_storage, + k8s_client=self.nb_config.k8s_v2_client, + workspace_mount_path=work_dir, + work_dir=work_dir, + repositories=repositories, + config=self.nb_config, + using_default_image=self.nb_config.sessions.default_image == image, + is_image_private=False, + internal_gitlab_user=internal_gitlab_user, + ) + cert_init, cert_vols = init_containers.certificates_container(self.nb_config) + session_init_containers = [InitContainer.model_validate(self.nb_config.k8s_v2_client.sanitize(cert_init))] + extra_volumes = [ExtraVolume.model_validate(self.nb_config.k8s_v2_client.sanitize(i)) for i in cert_vols] + if isinstance(user, AuthenticatedAPIUser): + extra_volumes.append( + ExtraVolume( + name="renku-authorized-emails", + secret=SecretAsVolume( + secretName=server_name, + items=[SecretAsVolumeItem(key="authorized_emails", path="authorized_emails")], + ), + ) + ) + git_clone = await init_containers.git_clone_container_v2(server) + if git_clone is not None: + session_init_containers.append(InitContainer.model_validate(git_clone)) + extra_containers: list[ExtraContainer] = [] + git_proxy_container = await git_proxy.main_container(server) + if git_proxy_container is not None: + extra_containers.append( + ExtraContainer.model_validate(self.nb_config.k8s_v2_client.sanitize(git_proxy_container)) + ) + + parsed_server_url = urlparse(server.server_url) + annotations: dict[str, str] = { + "renku.io/project_id": str(launcher.project_id), + "renku.io/launcher_id": body.launcher_id, + "renku.io/resource_class_id": str(body.resource_class_id or default_resource_class.id), + } + manifest = AmaltheaSessionV1Alpha1( + metadata=Metadata(name=server_name, annotations=annotations), + spec=AmaltheaSessionSpec( + codeRepositories=[], + dataSources=[], + hibernated=False, + session=Session( + image=image, + urlPath=parsed_server_url.path, + port=environment.port, + storage=Storage( + className=self.nb_config.sessions.storage.pvs_storage_class, + size=str(body.disk_storage) + "G", + mountPath=environment.mount_directory.as_posix(), + ), + workingDir=environment.working_directory.as_posix(), + runAsUser=environment.uid, + runAsGroup=environment.gid, + resources=Resources(claims=None, requests=None, limits=None), + extraVolumeMounts=[], + command=environment.command, + args=environment.args, + shmSize="1G", + env=[ + SessionEnvItem(name="RENKU_BASE_URL_PATH", value=parsed_server_url.path), + SessionEnvItem(name="RENKU_BASE_URL", value=server.server_url), + ], + ), + ingress=Ingress( + host=self.nb_config.sessions.ingress.host, + ingressClassName=self.nb_config.sessions.ingress.annotations.get("kubernetes.io/ingress.class"), + annotations=self.nb_config.sessions.ingress.annotations, + tlsSecret=TlsSecret(adopt=False, name=self.nb_config.sessions.ingress.tls_secret) + if self.nb_config.sessions.ingress.tls_secret is not None + else None, + ), + extraContainers=extra_containers, + initContainers=session_init_containers, + extraVolumes=extra_volumes, + culling=Culling( + maxAge=f"{self.nb_config.sessions.culling.registered.max_age_seconds}s", + maxFailedDuration=f"{self.nb_config.sessions.culling.registered.failed_seconds}s", + maxHibernatedDuration=f"{self.nb_config.sessions.culling.registered.hibernated_seconds}s", + maxIdleDuration=f"{self.nb_config.sessions.culling.registered.idle_seconds}s", + maxStartingDuration=f"{self.nb_config.sessions.culling.registered.pending_seconds}s", + ), + authentication=Authentication( + enabled=True, + type=AuthenticationType.oauth2proxy + if isinstance(user, AuthenticatedAPIUser) + else AuthenticationType.token, + secretRef=SecretRef(name=server_name, key="auth", adopt=True), + extraVolumeMounts=[ + ExtraVolumeMount(name="renku-authorized-emails", mountPath="/authorized_emails") + ] + if isinstance(user, AuthenticatedAPIUser) + else [], + ), + ), + ) + parsed_proxy_url = urlparse(urljoin(server.server_url + "/", "oauth2")) + secret_data = {} + if isinstance(user, AuthenticatedAPIUser): + secret_data["auth"] = dumps( + { + "provider": "oidc", + "client_id": self.nb_config.sessions.oidc.client_id, + "oidc_issuer_url": self.nb_config.sessions.oidc.issuer_url, + "session_cookie_minimal": True, + "skip_provider_button": True, + "redirect_url": urljoin(server.server_url + "/", "oauth2/callback"), + "cookie_path": parsed_server_url.path, + "proxy_prefix": parsed_proxy_url.path, + "authenticated_emails_file": "/authorized_emails/authorized_emails", + "client_secret": self.nb_config.sessions.oidc.client_secret, + "cookie_secret": base64.urlsafe_b64encode(os.urandom(32)).decode(), + "insecure_oidc_allow_unverified_email": self.nb_config.sessions.oidc.allow_unverified_email, + } + ) + secret_data["authorized_emails"] = user.email + else: + secret_data["auth"] = safe_dump( + { + "token": user.id, + "cookie_key": "Renku-Auth-Anon-Id", + "verbose": True, + } + ) + secret = V1Secret(metadata=V1ObjectMeta(name=server_name), string_data=secret_data) + secret = await self.nb_config.k8s_v2_client.create_secret(secret) + try: + manifest = await self.nb_config.k8s_v2_client.create_server(manifest, user.id) + except Exception: + await self.nb_config.k8s_v2_client.delete_secret(secret.metadata.name) + raise errors.ProgrammingError(message="Could not start the amalthea session") + + return json(manifest.as_apispec().model_dump(mode="json", exclude_none=True), 201) + + return "/sessions", ["POST"], _handler + + def get_all(self) -> BlueprintFactoryResponse: + """Get all sessions for a user.""" + + @authenticate(self.authenticator) + async def _handler(_: Request, user: AuthenticatedAPIUser | AnonymousAPIUser) -> HTTPResponse: + sessions = await self.nb_config.k8s_v2_client.list_servers(user.id) + output: list[dict] = [] + for session in sessions: + output.append(session.as_apispec().model_dump(exclude_none=True, mode="json")) + return json(output) + + return "/sessions", ["GET"], _handler + + def get_one(self) -> BlueprintFactoryResponse: + """Get a specific session for a user.""" + + @authenticate(self.authenticator) + async def _handler(_: Request, user: AuthenticatedAPIUser | AnonymousAPIUser, session_id: str) -> HTTPResponse: + session = await self.nb_config.k8s_v2_client.get_server(session_id, user.id) + if session is None: + raise errors.ValidationError(message=f"The session with ID {session_id} does not exist.", quiet=True) + return json(session.as_apispec().model_dump(exclude_none=True, mode="json")) + + return "/sessions/", ["GET"], _handler + + def delete(self) -> BlueprintFactoryResponse: + """Fully delete a session with the new operator.""" + + @authenticate(self.authenticator) + async def _handler(_: Request, user: AuthenticatedAPIUser | AnonymousAPIUser, session_id: str) -> HTTPResponse: + await self.nb_config.k8s_v2_client.delete_server(session_id, user.id) + return empty() + + return "/sessions/", ["DELETE"], _handler + + def patch(self) -> BlueprintFactoryResponse: + """Patch a session.""" + + @authenticate(self.authenticator) + @validate(json=apispec.SessionPatchRequest) + async def _handler( + _: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + session_id: str, + body: apispec.SessionPatchRequest, + ) -> HTTPResponse: + session = await self.nb_config.k8s_v2_client.get_server(session_id, user.id) + if session is None: + raise errors.MissingResourceError( + message=f"The sesison with ID {session_id} does not exist", quiet=True + ) + # TODO: Some patching should only be done when the session is in some states to avoid inadvertent restarts + patches: dict[str, Any] = {} + if body.resource_class_id is not None: + rcs = await self.rp_repo.get_classes(user, id=body.resource_class_id) + if len(rcs) == 0: + raise errors.MissingResourceError( + message=f"The resource class you requested with ID {body.resource_class_id} does not exist", + quiet=True, + ) + rc = rcs[0] + patches |= dict( + spec=dict( + session=dict( + resources=dict(requests=dict(cpu=f"{round(rc.cpu * 1000)}m", memory=f"{rc.memory}Gi")) + ) + ) + ) + # TODO: Add a config to specifiy the gpu kind, there is also GpuKind enum in reosurce_pools + patches["spec"]["session"]["resources"]["requests"]["nvidia.com/gpu"] = rc.gpu + # NOTE: K8s fails if the gpus limit is not equal to the requests because it cannot be overcommited + patches["spec"]["session"]["resources"]["limits"] = {"nvidia.com/gpu": rc.gpu} + if ( + body.state is not None + and body.state.value.lower() == State.Hibernated.value.lower() + and body.state.value.lower() != session.status.state.value.lower() + ): + if "spec" not in patches: + patches["spec"] = {} + patches["spec"]["hibernated"] = True + elif ( + body.state is not None + and body.state.value.lower() == State.Running.value.lower() + and session.status.state.value.lower() != body.state.value.lower() + ): + if "spec" not in patches: + patches["spec"] = {} + patches["spec"]["hibernated"] = False + + if len(patches) > 0: + new_session = await self.nb_config.k8s_v2_client.patch_server(session_id, user.id, patches) + else: + new_session = session + + return json(new_session.as_apispec().model_dump(exclude_none=True, mode="json")) + + return "/sessions/", ["PATCH"], _handler + + def logs(self) -> BlueprintFactoryResponse: + """Get logs from the session.""" + + @authenticate(self.authenticator) + @validate(query=apispec.SessionsSessionIdLogsGetParametersQuery) + async def _handler( + _: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + session_id: str, + query: apispec.SessionsSessionIdLogsGetParametersQuery, + ) -> HTTPResponse: + logs = await self.nb_config.k8s_v2_client.get_server_logs(session_id, user.id, query.max_lines) + return json(apispec.SessionLogsResponse.model_validate(logs).model_dump_json(exclude_none=True)) + + return "/sessions//logs", ["GET"], _handler diff --git a/components/renku_data_services/notebooks/config/__init__.py b/components/renku_data_services/notebooks/config/__init__.py index 5d368f00c..7b3413690 100644 --- a/components/renku_data_services/notebooks/config/__init__.py +++ b/components/renku_data_services/notebooks/config/__init__.py @@ -4,6 +4,12 @@ from dataclasses import dataclass, field from typing import Any, Optional, Protocol, Self +from renku_data_services.base_models import APIUser +from renku_data_services.crc.db import ResourcePoolRepository +from renku_data_services.crc.models import ResourceClass +from renku_data_services.db_config.config import DBConfig +from renku_data_services.k8s.clients import K8sCoreClient, K8sSchedulingClient +from renku_data_services.k8s.quota import QuotaRepository from renku_data_services.notebooks.api.classes.data_service import ( CloudStorageConfig, CRCValidator, @@ -13,12 +19,18 @@ GitProviderHelper, StorageValidator, ) -from renku_data_services.notebooks.api.classes.k8s_client import JsServerCache, K8sClient, NamespacedK8sClient +from renku_data_services.notebooks.api.classes.k8s_client import ( + AmaltheaSessionV1Alpha1Kr8s, + JupyterServerV1Alpha1Kr8s, + K8sClient, + NamespacedK8sClient, + ServerCache, +) from renku_data_services.notebooks.api.classes.repository import GitProvider -from renku_data_services.notebooks.api.classes.user import User from renku_data_services.notebooks.api.schemas.server_options import ServerOptions from renku_data_services.notebooks.config.dynamic import ( _AmaltheaConfig, + _AmaltheaV2Config, _CloudStorage, _GitConfig, _K8sConfig, @@ -29,25 +41,28 @@ _UserSecrets, ) from renku_data_services.notebooks.config.static import _ServersGetEndpointAnnotations +from renku_data_services.notebooks.crs import AmaltheaSessionV1Alpha1, JupyterServerV1Alpha1 class CRCValidatorProto(Protocol): """Compute resource control validator.""" - def validate_class_storage( + async def validate_class_storage( self, - user: User, + user: APIUser, class_id: int, storage: Optional[int] = None, ) -> ServerOptions: """Validate the resource class storage for the session.""" ... - def get_default_class(self) -> dict[str, Any]: + async def get_default_class(self) -> ResourceClass: """Get the default resource class.""" ... - def find_acceptable_class(self, user: User, requested_server_options: ServerOptions) -> Optional[ServerOptions]: + async def find_acceptable_class( + self, user: APIUser, requested_server_options: ServerOptions + ) -> Optional[ServerOptions]: """Find a suitable resource class based on resource requirements.""" ... @@ -55,15 +70,17 @@ def find_acceptable_class(self, user: User, requested_server_options: ServerOpti class StorageValidatorProto(Protocol): """Cloud storage validator protocol.""" - def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> CloudStorageConfig: + async def get_storage_by_id( + self, user: APIUser, internal_gitlab_user: APIUser, project_id: int, storage_id: str + ) -> CloudStorageConfig: """Get storage by ID.""" ... - def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: + async def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: """Validate a storage configuration.""" ... - def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: + async def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: """Obscure passsword fields in storage credentials.""" ... @@ -71,7 +88,7 @@ def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> class GitProviderHelperProto(Protocol): """Git provider protocol.""" - def get_providers(self, user: User) -> list[GitProvider]: + async def get_providers(self, user: APIUser) -> list[GitProvider]: """Get a list of git providers.""" ... @@ -89,7 +106,8 @@ class _NotebooksConfig: crc_validator: CRCValidatorProto storage_validator: StorageValidatorProto git_provider_helper: GitProviderHelperProto - k8s_client: K8sClient + k8s_client: K8sClient[JupyterServerV1Alpha1, JupyterServerV1Alpha1Kr8s] + k8s_v2_client: K8sClient[AmaltheaSessionV1Alpha1, AmaltheaSessionV1Alpha1Kr8s] current_resource_schema_version: int = 1 anonymous_sessions_enabled: bool = False ssh_enabled: bool = False @@ -103,54 +121,64 @@ class _NotebooksConfig: ) @classmethod - def from_env(cls) -> Self: - dummy_stores = _parse_str_as_bool(os.environ.get("NB_DUMMY_STORES", False)) - sessions_config = _SessionConfig.from_env() - git_config = _GitConfig.from_env() - data_service_url = os.environ["NB_DATA_SERVICE_URL"] + def from_env(cls, db_config: DBConfig) -> Self: + dummy_stores = _parse_str_as_bool(os.environ.get("DUMMY_STORES", False)) + sessions_config: _SessionConfig + git_config: _GitConfig + data_service_url = os.environ.get("NB_DATA_SERVICE_URL", "http://127.0.0.1:8000") server_options = _ServerOptionsConfig.from_env() crc_validator: CRCValidatorProto storage_validator: StorageValidatorProto git_provider_helper: GitProviderHelperProto + k8s_namespace = os.environ.get("K8S_NAMESPACE", "default") + quota_repo: QuotaRepository if dummy_stores: crc_validator = DummyCRCValidator() + sessions_config = _SessionConfig._for_testing() storage_validator = DummyStorageValidator() git_provider_helper = DummyGitProviderHelper() + amalthea_config = _AmaltheaConfig(cache_url="http://not.specified") + amalthea_v2_config = _AmaltheaV2Config(cache_url="http://not.specified") + git_config = _GitConfig("http://not.specified", "registry.not.specified") else: - crc_validator = CRCValidator(data_service_url) + quota_repo = QuotaRepository(K8sCoreClient(), K8sSchedulingClient(), namespace=k8s_namespace) + rp_repo = ResourcePoolRepository(db_config.async_session_maker, quota_repo) + crc_validator = CRCValidator(rp_repo) + sessions_config = _SessionConfig.from_env() storage_validator = StorageValidator(data_service_url) - git_provider_helper = GitProviderHelper(data_service_url, sessions_config.ingress.host, git_config.url) + amalthea_config = _AmaltheaConfig.from_env() + amalthea_v2_config = _AmaltheaV2Config.from_env() + git_config = _GitConfig.from_env() + git_provider_helper = GitProviderHelper( + data_service_url, f"http://{sessions_config.ingress.host}", git_config.url + ) k8s_config = _K8sConfig.from_env() - amalthea_config = _AmaltheaConfig.from_env() renku_ns_client = NamespacedK8sClient( - k8s_config.renku_namespace, - amalthea_config.group, - amalthea_config.version, - amalthea_config.plural, + k8s_config.renku_namespace, JupyterServerV1Alpha1, JupyterServerV1Alpha1Kr8s ) - session_ns_client = None - if k8s_config.sessions_namespace: - session_ns_client = NamespacedK8sClient( - k8s_config.sessions_namespace, - amalthea_config.group, - amalthea_config.version, - amalthea_config.plural, - ) - js_cache = JsServerCache(amalthea_config.cache_url) + js_cache = ServerCache(amalthea_config.cache_url, JupyterServerV1Alpha1) k8s_client = K8sClient( - js_cache=js_cache, + cache=js_cache, renku_ns_client=renku_ns_client, - session_ns_client=session_ns_client, + username_label="renku.io/safe-username", + ) + v2_cache = ServerCache(amalthea_v2_config.cache_url, AmaltheaSessionV1Alpha1) + renku_ns_v2_client = NamespacedK8sClient( + k8s_config.renku_namespace, AmaltheaSessionV1Alpha1, AmaltheaSessionV1Alpha1Kr8s + ) + k8s_v2_client = K8sClient( + cache=v2_cache, + renku_ns_client=renku_ns_v2_client, username_label="renku.io/safe-username", ) return cls( server_options=server_options, - sessions=_SessionConfig.from_env(), - amalthea=_AmaltheaConfig.from_env(), + sessions=sessions_config, + amalthea=amalthea_config, sentry=_SentryConfig.from_env(), - git=_GitConfig.from_env(), - k8s=_K8sConfig.from_env(), + git=git_config, + k8s=k8s_config, cloud_storage=_CloudStorage.from_env(), user_secrets=_UserSecrets.from_env(), current_resource_schema_version=1, @@ -164,4 +192,5 @@ def from_env(cls) -> Self: storage_validator=storage_validator, git_provider_helper=git_provider_helper, k8s_client=k8s_client, + k8s_v2_client=k8s_v2_client, ) diff --git a/components/renku_data_services/notebooks/config/dynamic.py b/components/renku_data_services/notebooks/config/dynamic.py index e08eca830..b30388182 100644 --- a/components/renku_data_services/notebooks/config/dynamic.py +++ b/components/renku_data_services/notebooks/config/dynamic.py @@ -11,6 +11,8 @@ from ..api.schemas.config_server_options import ServerOptionsChoices, ServerOptionsDefaults +latest_version: str = "1.25.3" + def _parse_str_as_bool(val: Union[str, bool]) -> bool: if isinstance(val, str): @@ -97,11 +99,11 @@ def from_env(cls) -> Self: @dataclass class _GitProxyConfig: - sentry: _SentryConfig renku_client_secret: str = field(repr=False) + sentry: _SentryConfig = field(default_factory=_SentryConfig.from_env) port: int = 8080 health_port: int = 8081 - image: str = "renku/git-https-proxy:latest" + image: str = f"renku/git-https-proxy:{latest_version}" renku_client_id: str = "renku" @classmethod @@ -112,16 +114,16 @@ def from_env(cls) -> Self: sentry=_SentryConfig.from_env(prefix="NB_SESSIONS__GIT_PROXY__"), port=_parse_value_as_int(os.environ.get("NB_SESSIONS__GIT_PROXY__PORT", 8080)), health_port=_parse_value_as_int(os.environ.get("NB_SESSIONS__GIT_PROXY__HEALTH_PORT", 8081)), - image=os.environ.get("NB_SESSIONS__GIT_PROXY__IMAGE", "renku/git-https-proxy:latest"), + image=os.environ.get("NB_SESSIONS__GIT_PROXY__IMAGE", f"renku/git-https-proxy:{latest_version}"), ) @dataclass class _GitRpcServerConfig: - sentry: _SentryConfig + sentry: _SentryConfig = field(default_factory=_SentryConfig.from_env) host: str = "0.0.0.0" # nosec B104 port: int = 4000 - image: str = "renku/git-rpc-server:latest" + image: str = f"renku/git-rpc-server:{latest_version}" def __post_init__(self) -> None: self.port = _parse_value_as_int(self.port) @@ -129,7 +131,7 @@ def __post_init__(self) -> None: @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__IMAGE", "renku/git-rpc-server:latest"), + image=os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__IMAGE", f"renku/git-rpc-server:{latest_version}"), host=os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__HOST", "0.0.0.0"), # nosec B104 port=_parse_value_as_int(os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__PORT", 4000)), sentry=_SentryConfig.from_env(prefix="NB_SESSIONS__GIT_RPC_SERVER__"), @@ -138,13 +140,13 @@ def from_env(cls) -> Self: @dataclass class _GitCloneConfig: - image: str = "renku/git-clone:latest" + image: str = f"renku/git-clone:{latest_version}" sentry: _SentryConfig = field(default_factory=lambda: _SentryConfig(enabled=False)) @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_SESSIONS__GIT_CLONE__IMAGE", "renku/git-rpc-server:latest"), + image=os.environ.get("NB_SESSIONS__GIT_CLONE__IMAGE", f"renku/git-clone:{latest_version}"), sentry=_SentryConfig.from_env(prefix="NB_SESSIONS__GIT_CLONE__"), ) @@ -171,9 +173,9 @@ class _SessionOidcConfig: client_secret: str = field(repr=False) token_url: str auth_url: str + issuer_url: str client_id: str = "renku-jupyterserver" allow_unverified_email: Union[str, bool] = False - config_url: str = "/auth/realms/Renku/.well-known/openid-configuration" def __post_init__(self) -> None: self.allow_unverified_email = _parse_str_as_bool(self.allow_unverified_email) @@ -188,9 +190,7 @@ def from_env(cls) -> Self: os.environ.get("NB_SESSIONS__OIDC__ALLOW_UNVERIFIED_EMAIL", False) ), client_id=os.environ.get("NB_SESSIONS__OIDC__CLIENT_ID", "renku-jupyterserver"), - config_url=os.environ.get( - "NB_SESSIONS__OIDC__CONFIG_URL", "/auth/realms/Renku/.well-known/openid-configuration" - ), + issuer_url=os.environ["NB_SESSIONS__OIDC__ISSUER_URL"], ) @@ -203,7 +203,7 @@ class _CustomCaCertsConfig: @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_SESSIONS__CA_CERTS__IMAGE", "renku-jupyterserver"), + image=os.environ.get("NB_SESSIONS__CA_CERTS__IMAGE", "renku/certificates:0.0.2"), path=os.environ.get("NB_SESSIONS__CA_CERTS__PATH", "/auth/realms/Renku/.well-known/openid-configuration"), secrets=yaml.safe_load(StringIO(os.environ.get("NB_SESSIONS__CA_CERTS__SECRETS", "[]"))), ) @@ -226,6 +226,23 @@ def from_env(cls) -> Self: ) +@dataclass +class _AmaltheaV2Config: + cache_url: str + group: str = "amalthea.dev" + version: str = "v1alpha1" + plural: str = "amaltheasessions" + + @classmethod + def from_env(cls) -> Self: + return cls( + cache_url=os.environ["NB_AMALTHEA_V2__CACHE_URL"], + group=os.environ.get("NB_AMALTHEA_V2__GROUP", "amalthea.dev"), + version=os.environ.get("NB_AMALTHEA_V2__VERSION", "v1alpha1"), + plural=os.environ.get("NB_AMALTHEA_V2__PLURAL", "amaltheasessions"), + ) + + @dataclass class _SessionIngress: host: str @@ -389,20 +406,44 @@ def from_env(cls) -> Self: tolerations=yaml.safe_load(StringIO(os.environ.get("", "[]"))), ) + @classmethod + def _for_testing(cls) -> Self: + return cls( + culling=_SessionCullingConfig.from_env(), + git_proxy=_GitProxyConfig(renku_client_secret="not-defined"), # nosec B106 + git_rpc_server=_GitRpcServerConfig.from_env(), + git_clone=_GitCloneConfig.from_env(), + ingress=_SessionIngress(host="localhost"), + ca_certs=_CustomCaCertsConfig.from_env(), + oidc=_SessionOidcConfig( + client_id="not-defined", + client_secret="not-defined", # nosec B106 + token_url="http://not.defined", + auth_url="http://not.defined", + issuer_url="http://not.defined", + ), + storage=_SessionStorageConfig.from_env(), + containers=_SessionContainers.from_env(), + ssh=_SessionSshConfig.from_env(), + default_image=os.environ.get("", "renku/singleuser:latest"), + enforce_cpu_limits=CPUEnforcement(os.environ.get("", "off")), + termination_warning_duration_seconds=_parse_value_as_int(os.environ.get("", 12 * 60 * 60)), + image_default_workdir="/home/jovyan", + node_selector=yaml.safe_load(StringIO(os.environ.get("", "{}"))), + affinity=yaml.safe_load(StringIO(os.environ.get("", "{}"))), + tolerations=yaml.safe_load(StringIO(os.environ.get("", "[]"))), + ) + @dataclass class _K8sConfig: """Defines the k8s client and namespace.""" - renku_namespace: str - sessions_namespace: Optional[str] = None + renku_namespace: str = "default" @classmethod def from_env(cls) -> Self: - return cls( - renku_namespace=os.environ["KUBERNETES_NAMESPACE"], - sessions_namespace=os.environ.get("SESSIONS_NAMESPACE"), - ) + return cls(renku_namespace=os.environ.get("KUBERNETES_NAMESPACE", "default")) @dataclass @@ -449,7 +490,7 @@ def from_env(cls) -> Self: @dataclass class _UserSecrets: - image: str = "renku/secrets_mount:latest" + image: str = f"renku/secrets_mount:{latest_version}" secrets_storage_service_url: str = "http://renku-secrets-storage" def __post_init__(self) -> None: @@ -458,7 +499,7 @@ def __post_init__(self) -> None: @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_USER_SECRETS__IMAGE", "renku/secrets_mount:latest"), + image=os.environ.get("NB_USER_SECRETS__IMAGE", f"renku/secrets_mount:{latest_version}"), secrets_storage_service_url=os.environ.get( "NB_USER_SECRETS__SECRETS_STORAGE_SERVICE_URL", "http://renku-secrets-storage" ), diff --git a/components/renku_data_services/notebooks/config/static.py b/components/renku_data_services/notebooks/config/static.py index e31670f86..956401387 100644 --- a/components/renku_data_services/notebooks/config/static.py +++ b/components/renku_data_services/notebooks/config/static.py @@ -88,7 +88,7 @@ def __post_init__(self) -> None: annotation.get_field_name(sanitized=True): annotation.to_marshmallow_field() for annotation in self.annotations } - )(uknown=INCLUDE) + )(unknown=INCLUDE) def sanitize_dict(self, ann_dict: dict[str, str]) -> dict[str, str]: return cast(dict[str, str], self.schema().load(ann_dict)) diff --git a/components/renku_data_services/notebooks/cr_amalthea_session.py b/components/renku_data_services/notebooks/cr_amalthea_session.py new file mode 100644 index 000000000..16aa355e0 --- /dev/null +++ b/components/renku_data_services/notebooks/cr_amalthea_session.py @@ -0,0 +1,2884 @@ +# generated by datamodel-codegen: +# filename: +# timestamp: 2024-09-04T22:45:28+00:00 + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from pydantic import ConfigDict, Field +from renku_data_services.notebooks.cr_base import BaseCRD + + +class ExtraVolumeMount(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + mountPath: str = Field( + ..., + description="Path within the container at which the volume should be mounted. Must\nnot contain ':'.", + ) + mountPropagation: Optional[str] = Field( + default=None, + description="mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.", + ) + name: str = Field(..., description="This must match the Name of a Volume.") + readOnly: Optional[bool] = Field( + default=None, + description="Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.", + ) + subPath: Optional[str] = Field( + default=None, + description="Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).", + ) + subPathExpr: Optional[str] = Field( + default=None, + description="Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.", + ) + + +class SecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + key: str + name: str + + +class Type(Enum): + token = "token" + oauth2proxy = "oauth2proxy" + + +class Authentication(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + enabled: bool = True + extraVolumeMounts: Optional[List[ExtraVolumeMount]] = Field( + default=None, + description="Additional volume mounts for the authentication container.", + ) + secretRef: SecretRef = Field( + ..., + description="Kubernetes secret that contains the authentication configuration\nFor `token` a yaml file with the following keys is required:\n - token: the token value used to authenticate the user\n - cookie_key: the name of the cookie where the token will be saved and searched for\nFor `oauth2proxy` please see https://oauth2-proxy.github.io/oauth2-proxy/configuration/overview#config-file.\nNote that the `upstream` and `http_address` configuration options cannot be set from the secret because\nthe operator knows how to set these options to the proper values.", + ) + type: Type + + +class CloningConfigSecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + key: str + name: str + + +class ConfigSecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + key: str + name: str + + +class Type1(Enum): + git = "git" + + +class CodeRepository(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + clonePath: str = Field( + default=".", + description="Path relative to the session working directory where the repository should be cloned into.", + example="repositories/project1", + ) + cloningConfigSecretRef: Optional[CloningConfigSecretRef] = Field( + default=None, + description="The Kubernetes secret that contains the code repository configuration to be used during cloning.\nFor 'git' this should contain either:\nThe username and password\nThe private key and its corresponding password\nAn empty value can be used when cloning from public repositories using the http protocol\nNOTE: you have to specify the whole config in a single key in the secret.", + ) + configSecretRef: Optional[ConfigSecretRef] = Field( + default=None, + description="The Kubernetes secret that contains the code repository configuration to be used when the session is running.\nFor 'git' this is the git configuration which can be used to inject credentials in addition to any other repo-specific Git configuration.\nNOTE: you have to specify the whole config in a single key in the secret.", + ) + remote: str = Field( + ..., + description="The HTTP url to the code repository", + example="https://github.com/SwissDataScienceCenter/renku", + ) + revision: Optional[str] = Field( + default=None, + description="The tag, branch or commit SHA to checkout, if omitted then will be the tip of the default branch of the repo", + example="main", + ) + type: Type1 = Field( + default="git", + description="The type of the code repository - currently the only supported kind is git.", + ) + + +class Culling(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + maxAge: Optional[str] = Field( + default=None, + description='The maximum allowed age for a session, regardless of whether it\nis active or not. When the threshold is reached the session is hibernated.\nA value of zero indicates that Amalthea will not automatically hibernate\nthe session based on its age.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxFailedDuration: Optional[str] = Field( + default=None, + description='How long can a server be in failed state before it gets hibernated. A\nvalue of zero indicates that the server will not be automatically\nhibernated by Amalthea if it is failing.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxHibernatedDuration: Optional[str] = Field( + default=None, + description='How long can a session be in hibernated state before\nit gets completely deleted. A value of zero indicates that hibernated servers\nwill not be automatically be deleted by Amalthea after a period of time.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxIdleDuration: Optional[str] = Field( + default=None, + description='How long should a server be idle for before it is hibernated. A value of\nzero indicates that Amalthea will not automatically hibernate inactive sessions.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxStartingDuration: Optional[str] = Field( + default=None, + description='How long can a server be in starting state before it gets hibernated. A\nvalue of zero indicates that the server will not be automatically hibernated\nby Amalthea because it took to long to start.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + + +class SecretRef1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + name: str + + +class Type2(Enum): + rclone = "rclone" + + +class DataSource(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + accessMode: str = Field( + default="ReadOnlyMany", description="The access mode for the data source" + ) + mountPath: str = Field( + default="data", + description="Path relative to the session working directory where the data should be mounted", + example="data/storages", + ) + secretRef: Optional[SecretRef1] = Field( + default=None, + description="The secret containing the configuration or credentials needed for access to the data.\nThe format of the configuration that is expected depends on the storage type.\nNOTE: define all values in a single key of the Kubernetes secret.\nrclone: any valid rclone configuration for a single remote, see the output of `rclone config providers` for validation and format.", + ) + type: Type2 = Field(default="rclone", description="The data source type") + + +class ConfigMapKeyRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="The key to select.") + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="Specify whether the ConfigMap or its key must be defined", + ) + + +class FieldRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiVersion: Optional[str] = Field( + default=None, + description='Version of the schema the FieldPath is written in terms of, defaults to "v1".', + ) + fieldPath: str = Field( + ..., description="Path of the field to select in the specified API version." + ) + + +class ResourceFieldRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + containerName: Optional[str] = Field( + default=None, + description="Container name: required for volumes, optional for env vars", + ) + divisor: Optional[Union[int, str]] = Field( + default=None, + description='Specifies the output format of the exposed resources, defaults to "1"', + ) + resource: str = Field(..., description="Required: resource to select") + + +class SecretKeyRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field( + ..., + description="The key of the secret to select from. Must be a valid secret key.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="Specify whether the Secret or its key must be defined", + ) + + +class ValueFrom(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapKeyRef: Optional[ConfigMapKeyRef] = Field( + default=None, description="Selects a key of a ConfigMap." + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + ) + secretKeyRef: Optional[SecretKeyRef] = Field( + default=None, description="Selects a key of a secret in the pod's namespace" + ) + + +class EnvItem(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., description="Name of the environment variable. Must be a C_IDENTIFIER." + ) + value: Optional[str] = Field( + default=None, + description='Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to "".', + ) + valueFrom: Optional[ValueFrom] = Field( + default=None, + description="Source for the environment variable's value. Cannot be used if value is not empty.", + ) + + +class ConfigMapRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, description="Specify whether the ConfigMap must be defined" + ) + + +class SecretRef2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, description="Specify whether the Secret must be defined" + ) + + +class EnvFromItem(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapRef: Optional[ConfigMapRef] = Field( + default=None, description="The ConfigMap to select from" + ) + prefix: Optional[str] = Field( + default=None, + description="An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + ) + secretRef: Optional[SecretRef2] = Field( + default=None, description="The Secret to select from" + ) + + +class Exec(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + command: Optional[List[str]] = Field( + default=None, + description="Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.", + ) + + +class HttpHeader(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., + description="The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header.", + ) + value: str = Field(..., description="The header field value") + + +class HttpGet(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class TcpSocket(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description="Optional: Host name to connect to, defaults to the pod IP.", + ) + port: Union[int, str] = Field( + ..., + description="Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + + +class PostStart(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class HttpGet1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class PreStop(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet1] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class Lifecycle(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + postStart: Optional[PostStart] = Field( + default=None, + description="PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + preStop: Optional[PreStop] = Field( + default=None, + description="PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + + +class Grpc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + port: int = Field( + ..., + description="Port number of the gRPC service. Number must be in the range 1 to 65535.", + ) + service: Optional[str] = Field( + default=None, + description="Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC.", + ) + + +class HttpGet2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class LivenessProbe(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet2] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class Port(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + containerPort: int = Field( + ..., + description="Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536.", + ) + hostIP: Optional[str] = Field( + default=None, description="What host IP to bind the external port to." + ) + hostPort: Optional[int] = Field( + default=None, + description="Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this.", + ) + name: Optional[str] = Field( + default=None, + description="If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.", + ) + protocol: str = Field( + default="TCP", + description='Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to "TCP".', + ) + + +class HttpGet3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class ReadinessProbe(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet3] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class ResizePolicyItem(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + resourceName: str = Field( + ..., + description="Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory.", + ) + restartPolicy: str = Field( + ..., + description="Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired.", + ) + + +class Claim(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., + description="Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container.", + ) + + +class Resources(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class Capabilities(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + add: Optional[List[str]] = Field(default=None, description="Added capabilities") + drop: Optional[List[str]] = Field(default=None, description="Removed capabilities") + + +class SeLinuxOptions(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + level: Optional[str] = Field( + default=None, + description="Level is SELinux level label that applies to the container.", + ) + role: Optional[str] = Field( + default=None, + description="Role is a SELinux role label that applies to the container.", + ) + type: Optional[str] = Field( + default=None, + description="Type is a SELinux type label that applies to the container.", + ) + user: Optional[str] = Field( + default=None, + description="User is a SELinux user label that applies to the container.", + ) + + +class SeccompProfile(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + localhostProfile: Optional[str] = Field( + default=None, + description='localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet\'s configured seccomp profile location.\nMust be set if type is "Localhost". Must NOT be set for any other type.', + ) + type: str = Field( + ..., + description="type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied.", + ) + + +class WindowsOptions(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + gmsaCredentialSpec: Optional[str] = Field( + default=None, + description="GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field.", + ) + gmsaCredentialSpecName: Optional[str] = Field( + default=None, + description="GMSACredentialSpecName is the name of the GMSA credential spec to use.", + ) + hostProcess: Optional[bool] = Field( + default=None, + description="HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true.", + ) + runAsUserName: Optional[str] = Field( + default=None, + description="The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.", + ) + + +class SecurityContext(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + allowPrivilegeEscalation: Optional[bool] = Field( + default=None, + description="AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows.", + ) + capabilities: Optional[Capabilities] = Field( + default=None, + description="The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows.", + ) + privileged: Optional[bool] = Field( + default=None, + description="Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + procMount: Optional[str] = Field( + default=None, + description="procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows.", + ) + readOnlyRootFilesystem: Optional[bool] = Field( + default=None, + description="Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsGroup: Optional[int] = Field( + default=None, + description="The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsNonRoot: Optional[bool] = Field( + default=None, + description="Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.", + ) + runAsUser: Optional[int] = Field( + default=None, + description="The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seLinuxOptions: Optional[SeLinuxOptions] = Field( + default=None, + description="The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seccompProfile: Optional[SeccompProfile] = Field( + default=None, + description="The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows.", + ) + windowsOptions: Optional[WindowsOptions] = Field( + default=None, + description="The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux.", + ) + + +class HttpGet4(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class StartupProbe(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet4] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class VolumeDevice(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + devicePath: str = Field( + ..., + description="devicePath is the path inside of the container that the device will be mapped to.", + ) + name: str = Field( + ..., + description="name must match the name of a persistentVolumeClaim in the pod", + ) + + +class VolumeMount(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + mountPath: str = Field( + ..., + description="Path within the container at which the volume should be mounted. Must\nnot contain ':'.", + ) + mountPropagation: Optional[str] = Field( + default=None, + description="mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.", + ) + name: str = Field(..., description="This must match the Name of a Volume.") + readOnly: Optional[bool] = Field( + default=None, + description="Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.", + ) + subPath: Optional[str] = Field( + default=None, + description="Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).", + ) + subPathExpr: Optional[str] = Field( + default=None, + description="Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.", + ) + + +class ExtraContainer(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + args: Optional[List[str]] = Field( + default=None, + description='Arguments to the entrypoint.\nThe container image\'s CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + command: Optional[List[str]] = Field( + default=None, + description='Entrypoint array. Not executed within a shell.\nThe container image\'s ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + env: Optional[List[EnvItem]] = Field( + default=None, + description="List of environment variables to set in the container.\nCannot be updated.", + ) + envFrom: Optional[List[EnvFromItem]] = Field( + default=None, + description="List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.", + ) + image: Optional[str] = Field( + default=None, + description="Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.", + ) + imagePullPolicy: Optional[str] = Field( + default=None, + description="Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + ) + lifecycle: Optional[Lifecycle] = Field( + default=None, + description="Actions that the management system should take in response to container lifecycle events.\nCannot be updated.", + ) + livenessProbe: Optional[LivenessProbe] = Field( + default=None, + description="Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + name: str = Field( + ..., + description="Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated.", + ) + ports: Optional[List[Port]] = Field( + default=None, + description='List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default "0.0.0.0" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated.', + ) + readinessProbe: Optional[ReadinessProbe] = Field( + default=None, + description="Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + resizePolicy: Optional[List[ResizePolicyItem]] = Field( + default=None, description="Resources resize policy for the container." + ) + resources: Optional[Resources] = Field( + default=None, + description="Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + restartPolicy: Optional[str] = Field( + default=None, + description='RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is "Always".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod\'s restart policy and the container type.\nSetting the RestartPolicy as "Always" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy "Always"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a "sidecar" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted.', + ) + securityContext: Optional[SecurityContext] = Field( + default=None, + description="SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + ) + startupProbe: Optional[StartupProbe] = Field( + default=None, + description="StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + stdin: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.", + ) + stdinOnce: Optional[bool] = Field( + default=None, + description="Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false", + ) + terminationMessagePath: Optional[str] = Field( + default=None, + description="Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.", + ) + terminationMessagePolicy: Optional[str] = Field( + default=None, + description="Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.", + ) + tty: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.", + ) + volumeDevices: Optional[List[VolumeDevice]] = Field( + default=None, + description="volumeDevices is the list of block devices to be used by the container.", + ) + volumeMounts: Optional[List[VolumeMount]] = Field( + default=None, + description="Pod volumes to mount into the container's filesystem.\nCannot be updated.", + ) + workingDir: Optional[str] = Field( + default=None, + description="Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.", + ) + + +class AwsElasticBlockStore(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + partition: Optional[int] = Field( + default=None, + description='partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as "1".\nSimilarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + ) + volumeID: str = Field( + ..., + description="volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + ) + + +class AzureDisk(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + cachingMode: Optional[str] = Field( + default=None, + description="cachingMode is the Host Caching mode: None, Read Only, Read Write.", + ) + diskName: str = Field( + ..., description="diskName is the Name of the data disk in the blob storage" + ) + diskURI: str = Field( + ..., description="diskURI is the URI of data disk in the blob storage" + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + kind: Optional[str] = Field( + default=None, + description="kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + + +class AzureFile(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretName: str = Field( + ..., + description="secretName is the name of secret that contains Azure Storage Account Name and Key", + ) + shareName: str = Field(..., description="shareName is the azure share Name") + + +class SecretRef3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + + +class Cephfs(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + monitors: List[str] = Field( + ..., + description="monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + path: Optional[str] = Field( + default=None, + description="path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + secretFile: Optional[str] = Field( + default=None, + description="secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + user: Optional[str] = Field( + default=None, + description="user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + + +class Cinder(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack.", + ) + volumeID: str = Field( + ..., + description="volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + ) + + +class Item(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="key is the key to project.") + mode: Optional[int] = Field( + default=None, + description="mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + ) + + +class ConfigMap(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + items: Optional[List[Item]] = Field( + default=None, + description="items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="optional specify whether the ConfigMap or its keys must be defined", + ) + + +class NodePublishSecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + + +class Csi(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + driver: str = Field( + ..., + description="driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster.", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType to mount. Ex. "ext4", "xfs", "ntfs".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.', + ) + nodePublishSecretRef: Optional[NodePublishSecretRef] = Field( + default=None, + description="nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write).", + ) + volumeAttributes: Optional[Dict[str, str]] = Field( + default=None, + description="volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.", + ) + + +class Item1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + ) + mode: Optional[int] = Field( + default=None, + description="Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + ) + + +class DownwardAPI(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="Optional: mode bits to use on created files by default. Must be a\nOptional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + items: Optional[List[Item1]] = Field( + default=None, description="Items is a list of downward API volume file" + ) + + +class EmptyDir(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + medium: Optional[str] = Field( + default=None, + description='medium represents what type of storage medium should back this directory.\nThe default is "" which means to use the node\'s default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir', + ) + sizeLimit: Optional[Union[int, str]] = Field( + default=None, + description="sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + ) + + +class DataSource1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiGroup: Optional[str] = Field( + default=None, + description="APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.", + ) + kind: str = Field(..., description="Kind is the type of resource being referenced") + name: str = Field(..., description="Name is the name of resource being referenced") + + +class DataSourceRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiGroup: Optional[str] = Field( + default=None, + description="APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.", + ) + kind: str = Field(..., description="Kind is the type of resource being referenced") + name: str = Field(..., description="Name is the name of resource being referenced") + namespace: Optional[str] = Field( + default=None, + description="Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + ) + + +class Resources1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class MatchExpression(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field( + ..., description="key is the label key that the selector applies to." + ) + operator: str = Field( + ..., + description="operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist.", + ) + values: Optional[List[str]] = Field( + default=None, + description="values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.", + ) + + +class Selector(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + matchExpressions: Optional[List[MatchExpression]] = Field( + default=None, + description="matchExpressions is a list of label selector requirements. The requirements are ANDed.", + ) + matchLabels: Optional[Dict[str, str]] = Field( + default=None, + description='matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is "key", the\noperator is "In", and the values array contains only "value". The requirements are ANDed.', + ) + + +class Spec1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + accessModes: Optional[List[str]] = Field( + default=None, + description="accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + ) + dataSource: Optional[DataSource1] = Field( + default=None, + description="dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource.", + ) + dataSourceRef: Optional[DataSourceRef] = Field( + default=None, + description="dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + ) + resources: Optional[Resources1] = Field( + default=None, + description="resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + ) + selector: Optional[Selector] = Field( + default=None, + description="selector is a label query over volumes to consider for binding.", + ) + storageClassName: Optional[str] = Field( + default=None, + description="storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + ) + volumeMode: Optional[str] = Field( + default=None, + description="volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.", + ) + volumeName: Optional[str] = Field( + default=None, + description="volumeName is the binding reference to the PersistentVolume backing this claim.", + ) + + +class VolumeClaimTemplate(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + metadata: Optional[Dict[str, Any]] = Field( + default=None, + description="May contain labels and annotations that will be copied into the PVC\nwhen creating it. No other fields are allowed and will be rejected during\nvalidation.", + ) + spec: Spec1 = Field( + ..., + description="The specification for the PersistentVolumeClaim. The entire content is\ncopied unchanged into the PVC that gets created from this\ntemplate. The same fields as in a PersistentVolumeClaim\nare also valid here.", + ) + + +class Ephemeral(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + volumeClaimTemplate: Optional[VolumeClaimTemplate] = Field( + default=None, + description="Will be used to create a stand-alone PVC to provision the volume.\nThe pod in which this EphemeralVolumeSource is embedded will be the\nowner of the PVC, i.e. the PVC will be deleted together with the\npod. The name of the PVC will be `-` where\n`` is the name from the `PodSpec.Volumes` array\nentry. Pod validation will reject the pod if the concatenated name\nis not valid for a PVC (for example, too long).\n\n\nAn existing PVC with that name that is not owned by the pod\nwill *not* be used for the pod to avoid using an unrelated\nvolume by mistake. Starting the pod is then blocked until\nthe unrelated PVC is removed. If such a pre-created PVC is\nmeant to be used by the pod, the PVC has to updated with an\nowner reference to the pod once the pod exists. Normally\nthis should not be necessary, but it may be useful when\nmanually reconstructing a broken cluster.\n\n\nThis field is read-only and no changes will be made by Kubernetes\nto the PVC after it has been created.\n\n\nRequired, must not be nil.", + ) + + +class Fc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + lun: Optional[int] = Field( + default=None, description="lun is Optional: FC target lun number" + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + targetWWNs: Optional[List[str]] = Field( + default=None, + description="targetWWNs is Optional: FC target worldwide names (WWNs)", + ) + wwids: Optional[List[str]] = Field( + default=None, + description="wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + ) + + +class FlexVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + driver: str = Field( + ..., description="driver is the name of the driver to use for this volume." + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.', + ) + options: Optional[Dict[str, str]] = Field( + default=None, + description="options is Optional: this field holds extra command options if any.", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.", + ) + + +class Flocker(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + datasetName: Optional[str] = Field( + default=None, + description="datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\nshould be considered as deprecated", + ) + datasetUUID: Optional[str] = Field( + default=None, + description="datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", + ) + + +class GcePersistentDisk(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + partition: Optional[int] = Field( + default=None, + description='partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as "1".\nSimilarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk', + ) + pdName: str = Field( + ..., + description="pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + ) + + +class GitRepo(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + directory: Optional[str] = Field( + default=None, + description="directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.", + ) + repository: str = Field(..., description="repository is the URL") + revision: Optional[str] = Field( + default=None, + description="revision is the commit hash for the specified revision.", + ) + + +class Glusterfs(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + endpoints: str = Field( + ..., + description="endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + ) + path: str = Field( + ..., + description="path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + ) + + +class HostPath(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + path: str = Field( + ..., + description="path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + ) + type: Optional[str] = Field( + default=None, + description='type for HostPath Volume\nDefaults to ""\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath', + ) + + +class Iscsi(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + chapAuthDiscovery: Optional[bool] = Field( + default=None, + description="chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + ) + chapAuthSession: Optional[bool] = Field( + default=None, + description="chapAuthSession defines whether support iSCSI Session CHAP authentication", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + initiatorName: Optional[str] = Field( + default=None, + description="initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n: will be created for the connection.", + ) + iqn: str = Field(..., description="iqn is the target iSCSI Qualified Name.") + iscsiInterface: Optional[str] = Field( + default=None, + description="iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).", + ) + lun: int = Field(..., description="lun represents iSCSI Target Lun number.") + portals: Optional[List[str]] = Field( + default=None, + description="portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is the CHAP Secret for iSCSI target and initiator authentication", + ) + targetPortal: str = Field( + ..., + description="targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).", + ) + + +class Nfs(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + path: str = Field( + ..., + description="path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + server: str = Field( + ..., + description="server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + + +class PersistentVolumeClaim(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claimName: str = Field( + ..., + description="claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false.", + ) + + +class PhotonPersistentDisk(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + pdID: str = Field( + ..., + description="pdID is the ID that identifies Photon Controller persistent disk", + ) + + +class PortworxVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + volumeID: str = Field( + ..., description="volumeID uniquely identifies a Portworx volume" + ) + + +class Item2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="key is the key to project.") + mode: Optional[int] = Field( + default=None, + description="mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + ) + + +class ConfigMap1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + items: Optional[List[Item2]] = Field( + default=None, + description="items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="optional specify whether the ConfigMap or its keys must be defined", + ) + + +class Item3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + ) + mode: Optional[int] = Field( + default=None, + description="Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + ) + + +class DownwardAPI1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + items: Optional[List[Item3]] = Field( + default=None, description="Items is a list of DownwardAPIVolume file" + ) + + +class Item4(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="key is the key to project.") + mode: Optional[int] = Field( + default=None, + description="mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + ) + + +class Secret(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + items: Optional[List[Item4]] = Field( + default=None, + description="items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="optional field specify whether the Secret or its key must be defined", + ) + + +class ServiceAccountToken(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + audience: Optional[str] = Field( + default=None, + description="audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.", + ) + expirationSeconds: Optional[int] = Field( + default=None, + description="expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.", + ) + path: str = Field( + ..., + description="path is the path relative to the mount point of the file to project the\ntoken into.", + ) + + +class Source(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMap: Optional[ConfigMap1] = Field( + default=None, + description="configMap information about the configMap data to project", + ) + downwardAPI: Optional[DownwardAPI1] = Field( + default=None, + description="downwardAPI information about the downwardAPI data to project", + ) + secret: Optional[Secret] = Field( + default=None, description="secret information about the secret data to project" + ) + serviceAccountToken: Optional[ServiceAccountToken] = Field( + default=None, + description="serviceAccountToken is information about the serviceAccountToken data to project", + ) + + +class Projected(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + sources: Optional[List[Source]] = Field( + default=None, description="sources is the list of volume projections" + ) + + +class Quobyte(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + group: Optional[str] = Field( + default=None, description="group to map volume access to\nDefault is no group" + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.", + ) + registry: str = Field( + ..., + description="registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes", + ) + tenant: Optional[str] = Field( + default=None, + description="tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin", + ) + user: Optional[str] = Field( + default=None, + description="user to map volume access to\nDefaults to serivceaccount user", + ) + volume: str = Field( + ..., + description="volume is a string that references an already created Quobyte volume by name.", + ) + + +class Rbd(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + image: str = Field( + ..., + description="image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + keyring: Optional[str] = Field( + default=None, + description="keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + monitors: List[str] = Field( + ..., + description="monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + pool: Optional[str] = Field( + default=None, + description="pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + user: Optional[str] = Field( + default=None, + description="user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + + +class ScaleIO(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs".\nDefault is "xfs".', + ) + gateway: str = Field( + ..., description="gateway is the host address of the ScaleIO API Gateway." + ) + protectionDomain: Optional[str] = Field( + default=None, + description="protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretRef: SecretRef3 = Field( + ..., + description="secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail.", + ) + sslEnabled: Optional[bool] = Field( + default=None, + description="sslEnabled Flag enable/disable SSL communication with Gateway, default false", + ) + storageMode: Optional[str] = Field( + default=None, + description="storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.", + ) + storagePool: Optional[str] = Field( + default=None, + description="storagePool is the ScaleIO Storage Pool associated with the protection domain.", + ) + system: str = Field( + ..., + description="system is the name of the storage system as configured in ScaleIO.", + ) + volumeName: Optional[str] = Field( + default=None, + description="volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source.", + ) + + +class Secret1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + items: Optional[List[Item4]] = Field( + default=None, + description="items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + optional: Optional[bool] = Field( + default=None, + description="optional field specify whether the Secret or its keys must be defined", + ) + secretName: Optional[str] = Field( + default=None, + description="secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + ) + + +class Storageos(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.", + ) + volumeName: Optional[str] = Field( + default=None, + description="volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace.", + ) + volumeNamespace: Optional[str] = Field( + default=None, + description='volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod\'s namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to "default" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.', + ) + + +class VsphereVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + storagePolicyID: Optional[str] = Field( + default=None, + description="storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + ) + storagePolicyName: Optional[str] = Field( + default=None, + description="storagePolicyName is the storage Policy Based Management (SPBM) profile name.", + ) + volumePath: str = Field( + ..., description="volumePath is the path that identifies vSphere volume vmdk" + ) + + +class ExtraVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + awsElasticBlockStore: Optional[AwsElasticBlockStore] = Field( + default=None, + description="awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + ) + azureDisk: Optional[AzureDisk] = Field( + default=None, + description="azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + ) + azureFile: Optional[AzureFile] = Field( + default=None, + description="azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + ) + cephfs: Optional[Cephfs] = Field( + default=None, + description="cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + ) + cinder: Optional[Cinder] = Field( + default=None, + description="cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + ) + configMap: Optional[ConfigMap] = Field( + default=None, + description="configMap represents a configMap that should populate this volume", + ) + csi: Optional[Csi] = Field( + default=None, + description="csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + ) + downwardAPI: Optional[DownwardAPI] = Field( + default=None, + description="downwardAPI represents downward API about the pod that should populate this volume", + ) + emptyDir: Optional[EmptyDir] = Field( + default=None, + description="emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + ) + ephemeral: Optional[Ephemeral] = Field( + default=None, + description="ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time.", + ) + fc: Optional[Fc] = Field( + default=None, + description="fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + ) + flexVolume: Optional[FlexVolume] = Field( + default=None, + description="flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.", + ) + flocker: Optional[Flocker] = Field( + default=None, + description="flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + ) + gcePersistentDisk: Optional[GcePersistentDisk] = Field( + default=None, + description="gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + ) + gitRepo: Optional[GitRepo] = Field( + default=None, + description="gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.", + ) + glusterfs: Optional[Glusterfs] = Field( + default=None, + description="glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md", + ) + hostPath: Optional[HostPath] = Field( + default=None, + description="hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.", + ) + iscsi: Optional[Iscsi] = Field( + default=None, + description="iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md", + ) + name: str = Field( + ..., + description="name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + ) + nfs: Optional[Nfs] = Field( + default=None, + description="nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + persistentVolumeClaim: Optional[PersistentVolumeClaim] = Field( + default=None, + description="persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + ) + photonPersistentDisk: Optional[PhotonPersistentDisk] = Field( + default=None, + description="photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + ) + portworxVolume: Optional[PortworxVolume] = Field( + default=None, + description="portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + ) + projected: Optional[Projected] = Field( + default=None, + description="projected items for all in one resources secrets, configmaps, and downward API", + ) + quobyte: Optional[Quobyte] = Field( + default=None, + description="quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + ) + rbd: Optional[Rbd] = Field( + default=None, + description="rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md", + ) + scaleIO: Optional[ScaleIO] = Field( + default=None, + description="scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + ) + secret: Optional[Secret1] = Field( + default=None, + description="secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + ) + storageos: Optional[Storageos] = Field( + default=None, + description="storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + ) + vsphereVolume: Optional[VsphereVolume] = Field( + default=None, + description="vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + ) + + +class TlsSecret(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + name: str + + +class Ingress(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + annotations: Optional[Dict[str, str]] = None + host: str + ingressClassName: Optional[str] = None + tlsSecret: Optional[TlsSecret] = Field( + default=None, + description="The name of the TLS secret, same as what is specified in a regular Kubernetes Ingress.", + ) + + +class ValueFrom1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapKeyRef: Optional[ConfigMapKeyRef] = Field( + default=None, description="Selects a key of a ConfigMap." + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + ) + secretKeyRef: Optional[SecretKeyRef] = Field( + default=None, description="Selects a key of a secret in the pod's namespace" + ) + + +class EnvItem1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., description="Name of the environment variable. Must be a C_IDENTIFIER." + ) + value: Optional[str] = Field( + default=None, + description='Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to "".', + ) + valueFrom: Optional[ValueFrom1] = Field( + default=None, + description="Source for the environment variable's value. Cannot be used if value is not empty.", + ) + + +class SecretRef10(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, description="Specify whether the Secret must be defined" + ) + + +class EnvFromItem1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapRef: Optional[ConfigMapRef] = Field( + default=None, description="The ConfigMap to select from" + ) + prefix: Optional[str] = Field( + default=None, + description="An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + ) + secretRef: Optional[SecretRef10] = Field( + default=None, description="The Secret to select from" + ) + + +class HttpGet5(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class PostStart1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet5] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class HttpGet6(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class PreStop1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet6] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class Lifecycle1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + postStart: Optional[PostStart1] = Field( + default=None, + description="PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + preStop: Optional[PreStop1] = Field( + default=None, + description="PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + + +class HttpGet7(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class LivenessProbe1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet7] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class HttpGet8(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class ReadinessProbe1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet8] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class Resources2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class SecurityContext1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + allowPrivilegeEscalation: Optional[bool] = Field( + default=None, + description="AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows.", + ) + capabilities: Optional[Capabilities] = Field( + default=None, + description="The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows.", + ) + privileged: Optional[bool] = Field( + default=None, + description="Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + procMount: Optional[str] = Field( + default=None, + description="procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows.", + ) + readOnlyRootFilesystem: Optional[bool] = Field( + default=None, + description="Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsGroup: Optional[int] = Field( + default=None, + description="The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsNonRoot: Optional[bool] = Field( + default=None, + description="Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.", + ) + runAsUser: Optional[int] = Field( + default=None, + description="The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seLinuxOptions: Optional[SeLinuxOptions] = Field( + default=None, + description="The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seccompProfile: Optional[SeccompProfile] = Field( + default=None, + description="The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows.", + ) + windowsOptions: Optional[WindowsOptions] = Field( + default=None, + description="The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux.", + ) + + +class HttpGet9(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class StartupProbe1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet9] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class InitContainer(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + args: Optional[List[str]] = Field( + default=None, + description='Arguments to the entrypoint.\nThe container image\'s CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + command: Optional[List[str]] = Field( + default=None, + description='Entrypoint array. Not executed within a shell.\nThe container image\'s ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + env: Optional[List[EnvItem1]] = Field( + default=None, + description="List of environment variables to set in the container.\nCannot be updated.", + ) + envFrom: Optional[List[EnvFromItem1]] = Field( + default=None, + description="List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.", + ) + image: Optional[str] = Field( + default=None, + description="Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.", + ) + imagePullPolicy: Optional[str] = Field( + default=None, + description="Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + ) + lifecycle: Optional[Lifecycle1] = Field( + default=None, + description="Actions that the management system should take in response to container lifecycle events.\nCannot be updated.", + ) + livenessProbe: Optional[LivenessProbe1] = Field( + default=None, + description="Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + name: str = Field( + ..., + description="Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated.", + ) + ports: Optional[List[Port]] = Field( + default=None, + description='List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default "0.0.0.0" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated.', + ) + readinessProbe: Optional[ReadinessProbe1] = Field( + default=None, + description="Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + resizePolicy: Optional[List[ResizePolicyItem]] = Field( + default=None, description="Resources resize policy for the container." + ) + resources: Optional[Resources2] = Field( + default=None, + description="Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + restartPolicy: Optional[str] = Field( + default=None, + description='RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is "Always".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod\'s restart policy and the container type.\nSetting the RestartPolicy as "Always" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy "Always"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a "sidecar" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted.', + ) + securityContext: Optional[SecurityContext1] = Field( + default=None, + description="SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + ) + startupProbe: Optional[StartupProbe1] = Field( + default=None, + description="StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + stdin: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.", + ) + stdinOnce: Optional[bool] = Field( + default=None, + description="Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false", + ) + terminationMessagePath: Optional[str] = Field( + default=None, + description="Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.", + ) + terminationMessagePolicy: Optional[str] = Field( + default=None, + description="Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.", + ) + tty: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.", + ) + volumeDevices: Optional[List[VolumeDevice]] = Field( + default=None, + description="volumeDevices is the list of block devices to be used by the container.", + ) + volumeMounts: Optional[List[VolumeMount]] = Field( + default=None, + description="Pod volumes to mount into the container's filesystem.\nCannot be updated.", + ) + workingDir: Optional[str] = Field( + default=None, + description="Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.", + ) + + +class ValueFrom2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapKeyRef: Optional[ConfigMapKeyRef] = Field( + default=None, description="Selects a key of a ConfigMap." + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + ) + secretKeyRef: Optional[SecretKeyRef] = Field( + default=None, description="Selects a key of a secret in the pod's namespace" + ) + + +class EnvItem2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., description="Name of the environment variable. Must be a C_IDENTIFIER." + ) + value: Optional[str] = Field( + default=None, + description='Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to "".', + ) + valueFrom: Optional[ValueFrom2] = Field( + default=None, + description="Source for the environment variable's value. Cannot be used if value is not empty.", + ) + + +class Resources3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class Storage(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + className: Optional[str] = None + mountPath: str = Field( + default="/workspace", + description="The absolute mount path for the session volume", + ) + size: Union[int, str] = "1Gi" + + +class Session(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + args: Optional[List[str]] = Field( + default=None, + description="The arguments to run in the session container, if omitted it will use the Docker image CMD", + ) + command: Optional[List[str]] = Field( + default=None, + description="The command to run in the session container, if omitted it will use the Docker image ENTRYPOINT", + ) + env: Optional[List[EnvItem2]] = None + extraVolumeMounts: Optional[List[ExtraVolumeMount]] = Field( + default=None, description="Additional volume mounts for the session container" + ) + image: str + port: int = Field( + ..., + description="The TCP port on the pod where the session can be accessed.\nIf the session has authentication enabled then the ingress and service will point to the authentication container\nand the authentication proxy container will proxy to this port. If authentication is disabled then the ingress and service\nroute directly to this port. Note that renku reserves the highest TCP value 65535 to run the authentication proxy.", + gt=0, + lt=65535, + ) + resources: Optional[Resources3] = Field( + default=None, + description="Resource requirements and limits in the same format as a Pod in Kubernetes", + ) + runAsGroup: int = Field( + default=1000, + description="The group is set on the session and this value is also set as the fsgroup for the whole pod and all session\ncontianers.", + ge=0, + ) + runAsUser: int = Field(default=1000, ge=0) + shmSize: Optional[Union[int, str]] = Field( + default=None, description="Size of /dev/shm" + ) + storage: Storage = {} + urlPath: str = Field( + default="/", + description="The path where the session can be accessed. If an ingress is enabled then this will be\nthe path prefix for the ingress.", + ) + workingDir: Optional[str] = Field( + default=None, + description="The abolute path for the working directory of the session container, if omitted it will use the image\nworking directory.", + ) + + +class Spec(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + authentication: Optional[Authentication] = Field( + default=None, description="Authentication configuration for the session" + ) + codeRepositories: Optional[List[CodeRepository]] = Field( + default=None, + description="A list of code repositories and associated configuration that will be cloned in the session", + ) + culling: Optional[Culling] = Field( + default=None, description="Culling configuration" + ) + dataSources: Optional[List[DataSource]] = Field( + default=None, + description="A list of data sources that should be added to the session", + ) + extraContainers: Optional[List[ExtraContainer]] = Field( + default=None, + description="Additional containers to add to the session statefulset.\nNOTE: The container names provided will be partially overwritten and randomized to avoid collisions", + ) + extraVolumes: Optional[List[ExtraVolume]] = Field( + default=None, + description="Additional volumes to include in the statefulset for a session", + ) + hibernated: bool = Field( + ..., + description="Will hibernate the session, scaling the session's statefulset to zero.", + ) + ingress: Optional[Ingress] = Field( + default=None, + description="Configuration for an ingress to the session, if omitted a Kubernetes Ingress will not be created", + ) + initContainers: Optional[List[InitContainer]] = Field( + default=None, + description="Additional init containers to add to the session statefulset\nNOTE: The container names provided will be partially overwritten and randomized to avoid collisions", + ) + session: Session = Field( + ..., + description="Specification for the main session container that the user will access and use", + ) + + +class Condition(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + lastTransitionTime: Optional[datetime] = None + message: Optional[str] = None + reason: Optional[str] = None + status: str + type: str + + +class ContainerCounts(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + ready: Optional[int] = None + total: Optional[int] = None + + +class InitContainerCounts(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + ready: Optional[int] = None + total: Optional[int] = None + + +class State(Enum): + Running = "Running" + Failed = "Failed" + Hibernated = "Hibernated" + NotReady = "NotReady" + RunningDegraded = "RunningDegraded" + + +class Status(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + conditions: Optional[List[Condition]] = Field( + default=None, + description="Conditions store the status conditions of the AmaltheaSessions. This is a standard thing that\nmany operators implement see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties", + ) + containerCounts: Optional[ContainerCounts] = Field( + default=None, + description="Counts of the total and ready containers, can represent either regular or init containers.", + ) + failingSince: Optional[datetime] = None + hibernatedSince: Optional[datetime] = None + idle: bool = False + idleSince: Optional[datetime] = None + initContainerCounts: Optional[InitContainerCounts] = Field( + default=None, + description="Counts of the total and ready containers, can represent either regular or init containers.", + ) + state: State = "NotReady" + url: Optional[str] = None + + +class Model(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiVersion: Optional[str] = Field( + default=None, + description="APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + ) + kind: Optional[str] = Field( + default=None, + description="Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + ) + metadata: Optional[Dict[str, Any]] = None + spec: Optional[Spec] = Field( + default=None, + description="AmaltheaSessionSpec defines the desired state of AmaltheaSession", + ) + status: Status = Field( + default={}, + description="AmaltheaSessionStatus defines the observed state of AmaltheaSession", + ) diff --git a/components/renku_data_services/notebooks/cr_base.py b/components/renku_data_services/notebooks/cr_base.py new file mode 100644 index 000000000..ec75db447 --- /dev/null +++ b/components/renku_data_services/notebooks/cr_base.py @@ -0,0 +1,12 @@ +"""Base models for K8s CRD specifications.""" + +from pydantic import BaseModel + + +class BaseCRD(BaseModel): + """Base CRD specification.""" + + class Config: + """Do not exclude unknown properties.""" + + extra = "allow" diff --git a/components/renku_data_services/notebooks/cr_jupyter_server.py b/components/renku_data_services/notebooks/cr_jupyter_server.py new file mode 100644 index 000000000..f4830e13e --- /dev/null +++ b/components/renku_data_services/notebooks/cr_jupyter_server.py @@ -0,0 +1,213 @@ +# generated by datamodel-codegen: +# filename: +# timestamp: 2024-09-04T22:45:30+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import Any, Dict, List, Optional + +from pydantic import ConfigDict, Field +from renku_data_services.notebooks.cr_base import BaseCRD + + +class Oidc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + authorizedEmails: List[str] = Field( + default=[], + description='List of users (identified by Email address read from the "email" OIDC claim) which are allowed to access this Jupyter session. This list is stored as a file and passed to the `--authenticated-emails-file` option (see https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview#command-line-options).', + ) + authorizedGroups: List[str] = Field( + default=[], + description='List of groups of users (read from the "groups" OIDC claim) which are allowed to access this Jupyter session. This list passed to the `--allowed-group` option (see https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview#command-line-options).', + ) + clientId: Optional[str] = Field( + default=None, + description="The client id of the application registered with the OIDC provider, see `--client-id` here: https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview/#command-line-options", + ) + enabled: bool = False + issuerUrl: Optional[str] = Field( + default=None, + description="Issuer URL of the OIDC provider, see `--oidc-issuer-url` here: https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview/#command-line-options", + ) + + +class Auth(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + oidc: Oidc = Field( + default={}, + description="Configuration for an OpenID connect provider to be used for access control to the jupyter server. Useful information can be found in the oauth2 proxy docs: https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview/", + ) + token: Optional[str] = Field( + default=None, + description='A token that will be passed to the `--ServerApp.token` option when running the Jupyter server and needed when first accessing the Jupyter server. The options are:\n\n - By leaving this field empty, a token will be autogenerated and\nadded under the key `ServerApp.token` to the secret which is created as a child of the custom resource object.\n\n - Setting the token to an empty string "" runs the Jupyter server\ncontainer itself without any authentication. This is recommended when enabling OIDC as authentication and authorization are then handled by the dedicated plugins.\n\n - Set an actual value here. Note that this string will be stored\nin clear text as part of the custom resource object. This option is mostly useful for dev purposes.', + ) + + +class Culling(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + maxAgeSecondsThreshold: int = Field( + default=0, + description="The maximum allowed age for a session, regardless of whether it is active or not. A value of zero indicates that the server cannot be culled due to its age.", + ge=0, + ) + idleSecondsThreshold: int = Field( + default=0, + description="How long should a server be idle for before it is culled. A value of zero indicates that the server should never be culled for inactivity.", + ge=0, + ) + startingSecondsThreshold: int = Field( + default=0, + description="How long can a server be in starting state before it gets culled. A value of zero indicates that the server cannot be culled due to starting too long.", + ge=0, + ) + failedSecondsThreshold: int = Field( + default=0, + description="How long can a server be in failed state before it gets culled. A value of zero indicates that the server cannot be culled due to failing.", + ge=0, + ) + hibernatedSecondsThreshold: int = Field( + default=0, + description="Number of seconds where a server can be in hibernated state before it gets culled. A value of zero indicates that hibernated servers cannot be culled.", + ge=0, + ) + + +class JupyterServer(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultUrl: str = Field( + default="/lab", + description="The default URL to redirect to from '/'. Frequently used values are '/lab' or '/rstudio'. Translates to `--ServerApp.default_url`.", + ) + hibernated: bool = Field( + default=False, description="Whether the server is hibernated or not." + ) + image: str = "jupyter/minimal-notebook:latest" + resources: Dict[str, Any] = Field( + default={}, + description="Regular K8s resource requests, will be set on the main notebook container.", + ) + rootDir: str = Field( + default="/home/jovyan/work", + description="The absolute path to the root/notebook directory for the jupyter server. Should lead to a subdirectory of or match the path at storage.pvc.mountPath. Translates to `--ServerApp.root_dir`.", + ) + + +class Type(Enum): + application_json_patch_json = "application/json-patch+json" + application_merge_patch_json = "application/merge-patch+json" + + +class Patch(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + patch: Optional[Any] = None + type: Optional[Type] = None + + +class Tls(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + enabled: bool = False + secretName: Optional[str] = Field( + default=None, + description="The name of the K8s TLS secret. Might be pre-existing in the cluster or created under that name by a tool like cert manager when needed.", + ) + + +class Routing(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description="Host under which the server will be available (eg myserver.example.com), should not include the schema.", + ) + ingressAnnotations: Dict[str, Any] = {} + path: str = Field( + default="/", description="Optionally make the server available under some path." + ) + tls: Tls = Field( + default={}, description="Settings for defining TLS termination by the ingress." + ) + + +class Pvc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + enabled: bool = Field( + default=False, + description="Whether a PVC should be used to back the session. Defaults to 'false' in which case an emptyDir volume will be used.", + ) + mountPath: str = Field( + default="/home/jovyan/work", + description="The absolute path to the location where the PVC should be mounted in the user session pod.", + ) + storageClassName: Optional[str] = Field( + default=None, + description="Storage class to be used for the PVC. If left empty, the default storage class defined for the cluster will be used.", + ) + + +class Storage(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + pvc: Pvc = {} + size: Any = Field( + default="100Mi", + description="Size of the PVC or sizeLimit of the emptyDir volume which backs the session respectively.", + ) + + +class Spec(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + auth: Auth = Field( + default={}, + description="Settings defining access control to the jupyter server.", + ) + culling: Culling = Field( + default={}, description="Options about culling idle servers" + ) + jupyterServer: JupyterServer = Field( + default={}, + description="Configuration options (such as image to run) for the Jupyter server. See also https://jupyter-server.readthedocs.io/en/latest/other/full-config.html", + ) + patches: List[Patch] = Field( + default=[], + description="Patches to be applied to the created child resources after template rendering. Currently json patches and json merge patches are supported.", + ) + routing: Routing = Field( + default={}, + description="Settings related to how the jupyter server will be exposed outside of the cluster.", + ) + storage: Storage = Field( + default={}, description="Settings to define storage to back the jupyter server." + ) + + +class Model(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + spec: Optional[Spec] = Field( + default=None, + description="User defined specification for a JupyterServer custom resource.", + ) + status: Dict[str, Any] = Field( + default={"children": {}, "mainPod": {}}, + description="A field for Jupyter Server status information, do not modify.", + ) diff --git a/components/renku_data_services/notebooks/crs.py b/components/renku_data_services/notebooks/crs.py new file mode 100644 index 000000000..206a32f96 --- /dev/null +++ b/components/renku_data_services/notebooks/crs.py @@ -0,0 +1,215 @@ +"""Custom resource definition with proper names from the autogenerated code.""" + +from datetime import datetime +from typing import Any, cast +from urllib.parse import urljoin + +from kubernetes.utils import parse_quantity +from pydantic import BaseModel, Field, field_validator +from sanic.log import logger +from ulid import ULID + +from renku_data_services.errors import errors +from renku_data_services.notebooks import apispec +from renku_data_services.notebooks.cr_amalthea_session import ( + Authentication, + CodeRepository, + Culling, + DataSource, + ExtraContainer, + ExtraVolume, + ExtraVolumeMount, + Ingress, + InitContainer, + SecretRef, + Session, + State, + Storage, + TlsSecret, +) +from renku_data_services.notebooks.cr_amalthea_session import EnvItem2 as SessionEnvItem +from renku_data_services.notebooks.cr_amalthea_session import Item4 as SecretAsVolumeItem +from renku_data_services.notebooks.cr_amalthea_session import Model as _ASModel +from renku_data_services.notebooks.cr_amalthea_session import Resources3 as Resources +from renku_data_services.notebooks.cr_amalthea_session import Secret1 as SecretAsVolume +from renku_data_services.notebooks.cr_amalthea_session import Spec as AmaltheaSessionSpec +from renku_data_services.notebooks.cr_amalthea_session import Type as AuthenticationType +from renku_data_services.notebooks.cr_amalthea_session import Type1 as CodeRepositoryType +from renku_data_services.notebooks.cr_jupyter_server import Model as _JSModel +from renku_data_services.notebooks.cr_jupyter_server import Patch +from renku_data_services.notebooks.cr_jupyter_server import Spec as JupyterServerSpec +from renku_data_services.notebooks.cr_jupyter_server import Type as PatchType + + +class Metadata(BaseModel): + """Basic k8s metadata spec.""" + + class Config: + """Do not exclude unknown properties.""" + + extra = "allow" + + name: str + namespace: str | None = None + labels: dict[str, str] = Field(default_factory=dict) + annotations: dict[str, str] = Field(default_factory=dict) + uid: str | None = None + creationTimestamp: datetime | None = None + deletionTimestamp: datetime | None = None + + +class ComputeResources(BaseModel): + """Resource requests from k8s values.""" + + cpu: float | None = None + memory: int | None = None + storage: int | None = None + gpu: int | None = None + + @field_validator("cpu", mode="before") + @classmethod + def _convert_k8s_cpu(cls, val: Any) -> Any: + if val is None: + return None + return float(parse_quantity(val)) + + @field_validator("gpu", mode="before") + @classmethod + def _convert_k8s_gpu(cls, val: Any) -> Any: + if val is None: + return None + return round(parse_quantity(val), ndigits=None) + + @field_validator("memory", "storage", mode="before") + @classmethod + def _convert_k8s_bytes(cls, val: Any) -> Any: + """Converts to gigabytes of base 10.""" + if val is None: + return None + return round(parse_quantity(val) / 1_000_000_000, ndigits=None) + + +class JupyterServerV1Alpha1(_JSModel): + """Jupyter server CRD.""" + + kind: str = "JupyterServer" + apiVersion: str = "amalthea.dev/v1alpha1" + metadata: Metadata + + def get_compute_resources(self) -> ComputeResources: + """Convert the k8s resource requests and storage into usable values.""" + if self.spec is None: + return ComputeResources() + resource_requests: dict = self.spec.jupyterServer.resources.get("requests", {}) + resource_requests["storage"] = self.spec.storage.size + return ComputeResources.model_validate(resource_requests) + + +class AmaltheaSessionV1Alpha1(_ASModel): + """Amalthea session CRD.""" + + kind: str = "AmaltheaSession" + apiVersion: str = "amalthea.dev/v1alpha1" + # Here we overwrite the default from ASModel because it is too weakly typed + metadata: Metadata # type: ignore[assignment] + + def get_compute_resources(self) -> ComputeResources: + """Convert the k8s resource requests and storage into usable values.""" + if self.spec is None: + return ComputeResources() + resource_requests: dict = {} + if self.spec.session.resources is not None: + resource_requests = self.spec.session.resources.requests or {} + resource_requests["storage"] = self.spec.session.storage.size + return ComputeResources.model_validate(resource_requests) + + @property + def project_id(self) -> ULID: + """Get the project ID from the annotations.""" + if "renku.io/project_id" not in self.metadata.annotations: + raise errors.ProgrammingError( + message=f"The session with name {self.metadata.name} is missing its project_id annotation" + ) + return cast(ULID, ULID.from_str(self.metadata.annotations["renku.io/project_id"])) + + @property + def launcher_id(self) -> ULID: + """Get the launcher ID from the annotations.""" + if "renku.io/launcher_id" not in self.metadata.annotations: + raise errors.ProgrammingError( + message=f"The session with name {self.metadata.name} is missing its launcher_id annotation" + ) + return cast(ULID, ULID.from_str(self.metadata.annotations["renku.io/launcher_id"])) + + @property + def resource_class_id(self) -> int: + """Get the resource class from the annotations.""" + if "renku.io/resource_class_id" not in self.metadata.annotations: + raise errors.ProgrammingError( + message=f"The session with name {self.metadata.name} is missing its resource_class_id annotation" + ) + return int(self.metadata.annotations["renku.io/resource_class_id"]) + + def as_apispec(self) -> apispec.SessionResponse: + """Convert the manifest into a form ready to be serialized and sent in a HTTP response.""" + if self.status is None: + raise errors.ProgrammingError( + message=f"The manifest for a session with name {self.metadata.name} cannot be serialized " + f"because it is missing a status" + ) + if self.spec is None: + raise errors.ProgrammingError( + message=f"The manifest for a session with name {self.metadata.name} cannot be serialized " + "because it is missing the spec field" + ) + if self.spec.session.resources is None: + raise errors.ProgrammingError( + message=f"The manifest for a session with name {self.metadata.name} cannot be serialized " + "because it is missing the spec.session.resources field" + ) + url = "None" + if self.status.url is None or self.status.url == "" or self.status.url.lower() == "None": + if self.spec is not None and self.spec.ingress is not None: + scheme = "https" if self.spec.ingress.tlsSecret is not None else "http" + url = urljoin(f"{scheme}://{self.spec.ingress.host}", self.spec.session.urlPath) + else: + url = self.status.url + ready_containers = 0 + total_containers = 0 + if self.status.initContainerCounts is not None: + ready_containers += self.status.initContainerCounts.ready or 0 + total_containers += self.status.initContainerCounts.total or 0 + if self.status.containerCounts is not None: + ready_containers += self.status.containerCounts.ready or 0 + total_containers += self.status.containerCounts.total or 0 + + if self.status.state in [State.Running, State.Hibernated, State.Failed]: + state = apispec.State3(self.status.state.value.lower()) + elif self.status.state == State.RunningDegraded: + state = apispec.State3.running + elif self.status.state == State.NotReady and self.metadata.deletionTimestamp is not None: + state = apispec.State3.stopping + else: + state = apispec.State3.starting + + return apispec.SessionResponse( + image=self.spec.session.image, + name=self.metadata.name, + resources=apispec.SessionResources( + requests=apispec.SessionResourcesRequests.model_validate( + self.get_compute_resources(), from_attributes=True + ) + if self.spec.session.resources.requests is not None + else None, + ), + started=self.metadata.creationTimestamp, + status=apispec.SessionStatus( + state=state, + ready_containers=ready_containers, + total_containers=total_containers, + ), + url=url, + project_id=str(self.project_id), + launcher_id=str(self.launcher_id), + resource_class_id=self.resource_class_id, + ) diff --git a/components/renku_data_services/notebooks/models.py b/components/renku_data_services/notebooks/models.py new file mode 100644 index 000000000..fec11eb4b --- /dev/null +++ b/components/renku_data_services/notebooks/models.py @@ -0,0 +1,76 @@ +"""Basic models for amalthea sessions.""" + +from dataclasses import dataclass +from pathlib import Path + +from pydantic import AliasGenerator, BaseModel, Field, Json + +from renku_data_services.notebooks.crs import AmaltheaSessionV1Alpha1 + + +@dataclass +class SessionEnvVar: + """Environment variables for an amalthea session.""" + + name: str + value: str + + +@dataclass +class SessionUserSecrets: + """User secret mounted in an amalthea session.""" + + mount_path: Path + user_secret_ids: list[str] + + +class _AmaltheaSessionAnnotations(BaseModel): + class Config: + extra = "allow" + alias_generator = AliasGenerator( + alias=lambda field_name: f"renku.io/{field_name}", + ) + + session_launcher_id: str | None = None + project_id: str | None = None + user_secrets_mount_path: str | None = None + user_secrets_ids: Json[list[str]] = Field(default_factory=list) + env_variable_names: Json[list[str]] = Field(default_factory=list) + + +class _MetadataValidation(BaseModel): + class Config: + extra = "allow" + + name: str + annotations: _AmaltheaSessionAnnotations + labels: dict[str, str] = Field(default_factory=dict) + namespace: str | None = None + + +class AmaltheaSessionManifest: + """The manifest for an amalthea session.""" + + def __init__(self, manifest: AmaltheaSessionV1Alpha1) -> None: + self._manifest = manifest + self._metadata = _MetadataValidation.model_validate(self._manifest.metadata) + + def __repr__(self) -> str: + return f"{self.__class__}(name={self._metadata.name})" + + @property + def env_vars(self) -> dict[str, SessionEnvVar]: + """Extract the environment variables from a manifest.""" + output: dict[str, SessionEnvVar] = {} + assert self._manifest.spec + for env in self._manifest.spec.session.env or []: + if env.value is None: + continue + output[env.name] = SessionEnvVar(env.name, env.value) + return output + + @property + def requested_env_vars(self) -> dict[str, SessionEnvVar]: + """The environment variables requested.""" + requested_names = self._metadata.annotations.env_variable_names + return {ikey: ival for ikey, ival in self.env_vars.items() if ikey in requested_names} diff --git a/components/renku_data_services/notebooks/util/authn.py b/components/renku_data_services/notebooks/util/authn.py index 6cc578cc4..d01b169e0 100644 --- a/components/renku_data_services/notebooks/util/authn.py +++ b/components/renku_data_services/notebooks/util/authn.py @@ -1,52 +1,40 @@ """Authentication that is compatible with the tokens sent to the notebook service.""" -from collections.abc import Awaitable, Callable, Coroutine -from dataclasses import dataclass +from collections.abc import Callable, Coroutine from functools import wraps from typing import Any, Concatenate, ParamSpec, TypeVar from sanic import Request -from renku_data_services.errors import errors -from renku_data_services.notebooks.api.classes.user import AnonymousUser, RegisteredUser -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.base_models import AnonymousAPIUser, APIUser, AuthenticatedAPIUser, Authenticator _T = TypeVar("_T") _P = ParamSpec("_P") -@dataclass -class NotebooksAuthenticator: - """Authentication for notebooks endpoints.""" - - config: _NotebooksConfig - - def authenticate(self, request: Request) -> RegisteredUser | AnonymousUser: - """Validate the tokens and ensure the user is signed in.""" - headers_dict: dict[str, str] = {str(k): str(v) for (k, v) in request.headers.items()} - user: RegisteredUser | AnonymousUser = RegisteredUser(headers_dict) - if not self.config.anonymous_sessions_enabled and not user.authenticated: - raise errors.UnauthorizedError(message="You have to be authenticated to perform this operation.") - if not user.authenticated: - user = AnonymousUser(headers_dict, self.config.git.url) - return user - - -def notebooks_authenticate( - authenticator: NotebooksAuthenticator, +def notebooks_internal_gitlab_authenticate( + authenticator: Authenticator, ) -> Callable[ - [Callable[Concatenate[Request, RegisteredUser | AnonymousUser, _P], Awaitable[_T]]], - Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], + [Callable[Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, APIUser, _P], Coroutine[Any, Any, _T]]], + Callable[Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, _P], Coroutine[Any, Any, _T]], ]: """Decorator for a Sanic handler that that adds a notebooks user.""" def decorator( - f: Callable[Concatenate[Request, RegisteredUser | AnonymousUser, _P], Awaitable[_T]], - ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: + f: Callable[ + Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, APIUser, _P], Coroutine[Any, Any, _T] + ], + ) -> Callable[Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, _P], Coroutine[Any, Any, _T]]: @wraps(f) - async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: - user = authenticator.authenticate(request) - return await f(request, user, *args, **kwargs) + async def decorated_function( + request: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + *args: _P.args, + **kwargs: _P.kwargs, + ) -> _T: + access_token = str(request.headers.get("Gitlab-Access-Token")) + internal_gitlab_user = await authenticator.authenticate(access_token, request) + return await f(request, user, internal_gitlab_user, *args, **kwargs) return decorated_function diff --git a/components/renku_data_services/notebooks/util/kubernetes_.py b/components/renku_data_services/notebooks/util/kubernetes_.py index 135afcc3a..7cf289c95 100644 --- a/components/renku_data_services/notebooks/util/kubernetes_.py +++ b/components/renku_data_services/notebooks/util/kubernetes_.py @@ -18,14 +18,15 @@ from __future__ import annotations -from dataclasses import dataclass from enum import StrEnum from hashlib import md5 -from typing import Any, Self, TypeAlias +from typing import Any, TypeAlias, cast import escapism from kubernetes.client import V1Container +from renku_data_services.notebooks.crs import Patch, PatchType + def renku_1_make_server_name(safe_username: str, namespace: str, project: str, branch: str, commit_sha: str) -> str: """Form a unique server name for Renku 1.0 sessions. @@ -58,11 +59,10 @@ def renku_2_make_server_name(safe_username: str, project_id: str, launcher_id: s server_hash = md5(server_string_for_hashing.encode(), usedforsecurity=False).hexdigest().lower() prefix = _make_server_name_prefix(safe_username) # NOTE: A K8s object name can only contain lowercase alphanumeric characters, hyphens, or dots. - # Must be less than 253 characters long and start and end with an alphanumeric. + # Must be no more than 63 characters because the name is used to create a k8s Service and Services + # have more restrictions for their names beacuse their names have to make a valid hostname. # NOTE: We use server name as a label value, so, server name must be less than 63 characters. - # NOTE: Amalthea adds 11 characters to the server name in a label, so we have only - # 52 characters available. - # !NOTE: For now we limit the server name to 42 characters. + # !NOTE: For now we limit the server name to a max of 42 characters. # NOTE: This is 12 + 9 + 21 = 42 characters return f"{prefix[:12]}-renku-2-{server_hash[:21]}" @@ -110,25 +110,17 @@ class PatchKind(StrEnum): merge: str = "application/merge-patch+json" -@dataclass -class Patch: - """Representation of a JSON patch.""" - - patch: JsonPatch | MergePatch - type: PatchKind - - @classmethod - def from_dict(cls, data: dict[str, Any]) -> Self: - """Create a patch from a dictionary.""" - return cls(data["patch"], PatchKind(data["type"])) - - def find_container(patches: list[Patch], container_name: str) -> dict[str, Any] | None: """Find the json patch corresponding a given container.""" + # rfc 7386 patches are dictionaries, i.e. merge patch or json merge patch + # rfc 6902 patches are lists, i.e. json patch for patch_obj in patches: - if patch_obj.type != PatchKind.json or not isinstance(patch_obj.patch, list): + if patch_obj.type != PatchType.application_json_patch_json or not isinstance(patch_obj.patch, list): continue for p in patch_obj.patch: + if not isinstance(p, dict): + continue + p = cast(dict[str, Any], p) if ( p.get("op") == "add" and p.get("path") == "/statefulset/spec/template/spec/containers/-" diff --git a/components/renku_data_services/notebooks/util/retries.py b/components/renku_data_services/notebooks/util/retries.py index e99f374a2..5e6854db8 100644 --- a/components/renku_data_services/notebooks/util/retries.py +++ b/components/renku_data_services/notebooks/util/retries.py @@ -1,9 +1,10 @@ """Methods for retrying requests.""" +import asyncio import functools -from collections.abc import Callable +from collections.abc import Awaitable, Callable from time import sleep -from typing import Any, Concatenate, ParamSpec, TypeVar +from typing import Concatenate, ParamSpec, TypeVar from renku_data_services.notebooks.errors.intermittent import RetryTimeoutError @@ -12,7 +13,7 @@ def retry_with_exponential_backoff( - should_retry: Callable[[Any], bool], + should_retry: Callable[[_RetType], bool], num_retries: int = 10, initial_wait_ms: int = 20, multiplier: float = 2.0, @@ -39,3 +40,38 @@ def wrapper_retry(*args: _Params.args, **kwargs: _Params.kwargs) -> _RetType: return wrapper_retry return decorator_retry + + +def retry_with_exponential_backoff_async( + should_retry: Callable[[_RetType], bool], + num_retries: int = 10, + initial_wait_ms: int = 20, + multiplier: float = 2.0, +) -> Callable[ + [Callable[Concatenate[_Params], Awaitable[_RetType]]], Callable[Concatenate[_Params], Awaitable[_RetType]] +]: + """Retries the wrapped function with an exponential backoff. + + The should_retry "callback" is passed the results from calling the wrapped function. + If the response is true, the function is called again, otherwise the loop ends and + the result of the wrapped function is returned. + + With the default values the wait times start with 20ms and then double every iteration. + """ + + def decorator_retry( + func: Callable[Concatenate[_Params], Awaitable[_RetType]], + ) -> Callable[Concatenate[_Params], Awaitable[_RetType]]: + @functools.wraps(func) + async def wrapper_retry(*args: _Params.args, **kwargs: _Params.kwargs) -> _RetType: + for i in range(num_retries): + res = await func(*args, **kwargs) + if not should_retry(res): + return res + + await asyncio.sleep(initial_wait_ms * (multiplier**i) / 1000) + raise RetryTimeoutError(f"Retrying the function {func.__name__} timed out after {num_retries} retries.") + + return wrapper_retry + + return decorator_retry diff --git a/components/renku_data_services/repositories/blueprints.py b/components/renku_data_services/repositories/blueprints.py index 551164ab0..48029b1b1 100644 --- a/components/renku_data_services/repositories/blueprints.py +++ b/components/renku_data_services/repositories/blueprints.py @@ -8,7 +8,7 @@ import renku_data_services.base_models as base_models from renku_data_services import errors -from renku_data_services.base_api.auth import authenticate +from renku_data_services.base_api.auth import authenticate_2 from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint from renku_data_services.base_api.etag import extract_if_none_match from renku_data_services.repositories import apispec @@ -28,23 +28,23 @@ class RepositoriesBP(CustomBlueprint): def get_one_repository(self) -> BlueprintFactoryResponse: """Get the metadata available about a repository.""" - @authenticate(self.internal_gitlab_authenticator) - async def _get_internal_gitlab_user(_: Request, user: base_models.APIUser) -> base_models.APIUser: - return user - - @authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @extract_if_none_match async def _get_one_repository( - request: Request, user: base_models.APIUser, repository_url: str, etag: str | None + request: Request, + user: base_models.APIUser, + internal_gitlab_user: base_models.APIUser, + repository_url: str, + etag: str | None, ) -> JSONResponse | HTTPResponse: repository_url = unquote(repository_url) RepositoryParams.model_validate(dict(repository_url=repository_url)) - async def get_internal_gitlab_user() -> base_models.APIUser: - return await _get_internal_gitlab_user(request) - result = await self.git_repositories_repo.get_repository( - repository_url=repository_url, user=user, etag=etag, get_internal_gitlab_user=get_internal_gitlab_user + repository_url=repository_url, + user=user, + etag=etag, + internal_gitlab_user=internal_gitlab_user, ) if result == "304": return HTTPResponse(status=304) diff --git a/components/renku_data_services/repositories/db.py b/components/renku_data_services/repositories/db.py index c44a1a5ce..afebc4b5e 100644 --- a/components/renku_data_services/repositories/db.py +++ b/components/renku_data_services/repositories/db.py @@ -1,7 +1,7 @@ """Adapters for repositories database classes.""" -from collections.abc import Callable, Coroutine -from typing import Any, Literal +from collections.abc import Callable +from typing import Literal from urllib.parse import urlparse from httpx import AsyncClient as HttpClient @@ -38,7 +38,7 @@ async def get_repository( repository_url: str, user: base_models.APIUser, etag: str | None, - get_internal_gitlab_user: Callable[..., Coroutine[Any, Any, base_models.APIUser]], + internal_gitlab_user: base_models.APIUser, ) -> models.RepositoryProviderMatch | Literal["304"]: """Get the metadata about a repository.""" repository_netloc = urlparse(repository_url).netloc @@ -52,10 +52,9 @@ async def get_repository( if self.internal_gitlab_url: internal_gitlab_netloc = urlparse(self.internal_gitlab_url).netloc if matched_client is None and internal_gitlab_netloc == repository_netloc: - gitlab_user = await get_internal_gitlab_user() return await self._get_repository_from_internal_gitlab( repository_url=repository_url, - user=gitlab_user, + user=internal_gitlab_user, etag=etag, internal_gitlab_url=self.internal_gitlab_url, ) diff --git a/components/renku_data_services/session/orm.py b/components/renku_data_services/session/orm.py index 12217cc39..2a7cc855d 100644 --- a/components/renku_data_services/session/orm.py +++ b/components/renku_data_services/session/orm.py @@ -131,8 +131,8 @@ def load(cls, launcher: models.SessionLauncher) -> "SessionLauncherORM": creation_date=launcher.creation_date, description=launcher.description, project_id=ULID.from_str(launcher.project_id), - resource_class_id=launcher.resource_class_id, environment_id=launcher.environment.id, + resource_class_id=launcher.resource_class_id, ) def dump(self) -> models.SessionLauncher: diff --git a/components/renku_data_services/storage/blueprints.py b/components/renku_data_services/storage/blueprints.py index 5fb3c4b38..5a0f00634 100644 --- a/components/renku_data_services/storage/blueprints.py +++ b/components/renku_data_services/storage/blueprints.py @@ -55,7 +55,6 @@ async def _get( validator: RCloneValidator, query: apispec.StorageParams, ) -> JSONResponse: - storage: list[models.CloudStorage] storage = await self.storage_repo.get_storage(user=user, project_id=query.project_id) return json([dump_storage_with_sensitive_fields(s, validator) for s in storage]) @@ -202,7 +201,6 @@ async def _get( validator: RCloneValidator, query: apispec.StorageV2Params, ) -> JSONResponse: - storage: list[models.CloudStorage] storage = await self.storage_v2_repo.get_storage( user=user, include_secrets=True, project_id=query.project_id ) diff --git a/components/renku_data_services/storage/db.py b/components/renku_data_services/storage/db.py index c2156da0b..583331d5f 100644 --- a/components/renku_data_services/storage/db.py +++ b/components/renku_data_services/storage/db.py @@ -56,7 +56,7 @@ async def get_storage( name: str | None = None, include_secrets: bool = False, filter_by_access_level: bool = True, - ) -> list[models.CloudStorage]: + ) -> list[models.SavedCloudStorage]: """Get a storage from the database.""" async with self.session_maker() as session: if not project_id and not name and not id: @@ -91,7 +91,7 @@ async def get_storage( return [s.dump() for s in storage_orms if s.project_id in accessible_projects] - async def get_storage_by_id(self, storage_id: ULID, user: base_models.APIUser) -> models.CloudStorage: + async def get_storage_by_id(self, storage_id: ULID, user: base_models.APIUser) -> models.SavedCloudStorage: """Get a single storage by id.""" storages = await self.get_storage(user, id=str(storage_id), include_secrets=True, filter_by_access_level=False) @@ -102,9 +102,7 @@ async def get_storage_by_id(self, storage_id: ULID, user: base_models.APIUser) - return storages[0] - async def insert_storage( - self, storage: models.UnsavedCloudStorage, user: base_models.APIUser - ) -> models.CloudStorage: + async def insert_storage(self, storage: models.CloudStorage, user: base_models.APIUser) -> models.SavedCloudStorage: """Insert a new cloud storage entry.""" if not await self.filter_projects_by_access_level(user, [storage.project_id], authz_models.Role.OWNER): raise errors.ForbiddenError(message="User does not have access to this project") @@ -118,7 +116,9 @@ async def insert_storage( session.add(orm) return orm.dump() - async def update_storage(self, storage_id: ULID, user: base_models.APIUser, **kwargs: dict) -> models.CloudStorage: + async def update_storage( + self, storage_id: ULID, user: base_models.APIUser, **kwargs: dict + ) -> models.SavedCloudStorage: """Update a cloud storage entry.""" async with self.session_maker() as session, session.begin(): res = await session.execute( diff --git a/components/renku_data_services/storage/models.py b/components/renku_data_services/storage/models.py index e019f77c5..023f58f45 100644 --- a/components/renku_data_services/storage/models.py +++ b/components/renku_data_services/storage/models.py @@ -253,3 +253,9 @@ class CloudStorageSecretUpsert(BaseModel): name: str = Field() value: str = Field() + + +class SavedCloudStorage(CloudStorage): + """A cloud storage that has been saved in the DB.""" + + storage_id: ULID diff --git a/components/renku_data_services/storage/orm.py b/components/renku_data_services/storage/orm.py index e127cc439..a2b6bd37f 100644 --- a/components/renku_data_services/storage/orm.py +++ b/components/renku_data_services/storage/orm.py @@ -81,9 +81,9 @@ def load(cls, storage: models.UnsavedCloudStorage) -> "CloudStorageORM": readonly=storage.readonly, ) - def dump(self) -> models.CloudStorage: + def dump(self) -> models.SavedCloudStorage: """Create a cloud storage model from the ORM object.""" - return models.CloudStorage( + return models.SavedCloudStorage( project_id=self.project_id, name=self.name, storage_type=self.storage_type, diff --git a/components/renku_data_services/utils/sqlalchemy.py b/components/renku_data_services/utils/sqlalchemy.py index 82bcfb9db..de520c5d7 100644 --- a/components/renku_data_services/utils/sqlalchemy.py +++ b/components/renku_data_services/utils/sqlalchemy.py @@ -32,11 +32,14 @@ class PurePosixPathType(types.TypeDecorator): impl = types.String cache_ok = True - def process_bind_param(self, value: PurePosixPath | None, dialect: Dialect) -> str | None: + def process_bind_param(self, value: PurePosixPath | str | None, dialect: Dialect) -> str | None: """Transform value for storing in the database.""" if value is None: return None - return value.as_posix() + elif isinstance(value, str): + return value + else: + return value.as_posix() def process_result_value(self, value: str | None, dialect: Dialect) -> PurePosixPath | None: """Transform string from database into PosixPath.""" diff --git a/poetry.lock b/poetry.lock index 0a75f5f82..a4fd56ed5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiofiles" @@ -215,6 +215,17 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "appier" +version = "1.34.4" +description = "Appier Framework" +optional = false +python-versions = "*" +files = [ + {file = "appier-1.34.4-py2.py3-none-any.whl", hash = "sha256:96769bf56e015175f798958d6eeb189b6fbc2ca799f3b7d2c9bd463eb45a12ae"}, + {file = "appier-1.34.4.tar.gz", hash = "sha256:dd3b244ee2797c7ceda0b81f2331e39724262d4e838f1e53866543136d88ee96"}, +] + [[package]] name = "argcomplete" version = "3.5.0" @@ -229,6 +240,20 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -318,20 +343,21 @@ cryptography = "*" [[package]] name = "authzed" -version = "0.18.3" +version = "0.16.0" description = "Client library for SpiceDB." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "authzed-0.18.3-py3-none-any.whl", hash = "sha256:367baa6f00dac68daad814f7db82464576530a11806ac4b17978d8c0e89511b7"}, - {file = "authzed-0.18.3.tar.gz", hash = "sha256:a06a930c78fdbe61f5caf4415739a9941d23068527fcf91011edc1ae0188a4f5"}, + {file = "authzed-0.16.0-py3-none-any.whl", hash = "sha256:84d3f3b1e4f8db19ddf86e9a5dea347f2fc5ce6f1dcd4c897758db57be5bde93"}, + {file = "authzed-0.16.0.tar.gz", hash = "sha256:29d6338c9c566227f704718639558959afc027be8d5b46bd01ef1dab5a26d0db"}, ] [package.dependencies] -googleapis-common-protos = ">=1.65.0,<2.0.0" -grpc-interceptor = ">=0.15.4,<0.16.0" +google_api = ">=0.1.12,<0.2.0" +google-api-core = ">=2.4.0,<3.0.0" grpcio = ">=1.63,<2.0" protobuf = ">=5.26,<6" +typing-extensions = ">=3.7.4,<5" [[package]] name = "avro-preprocessor" @@ -763,38 +789,43 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "43.0.1" +version = "42.0.8" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, + {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, + {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, + {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, + {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, + {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, + {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, ] [package.dependencies] @@ -807,7 +838,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -1214,6 +1245,43 @@ files = [ {file = "genson-1.3.0.tar.gz", hash = "sha256:e02db9ac2e3fd29e65b5286f7135762e2cd8a986537c075b06fc5f1517308e37"}, ] +[[package]] +name = "google-api" +version = "0.1.12" +description = "Google API Client" +optional = false +python-versions = "*" +files = [ + {file = "google_api-0.1.12-py2.py3-none-any.whl", hash = "sha256:618f9f2076482a128c408867b5398b291938fe8e653ed7f8ed58fce5042f0c75"}, + {file = "google_api-0.1.12.tar.gz", hash = "sha256:5611c87cdfc6b72927a5e2ea9299ddd6f3a206e29a342b86d3ff3ecc351c30a3"}, +] + +[package.dependencies] +appier = "*" + +[[package]] +name = "google-api-core" +version = "2.20.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_core-2.20.0-py3-none-any.whl", hash = "sha256:ef0591ef03c30bb83f79b3d0575c3f31219001fc9c5cf37024d08310aeffed8a"}, + {file = "google_api_core-2.20.0.tar.gz", hash = "sha256:f74dff1889ba291a4b76c5079df0711810e2d9da81abfdc99957bc961c1eb28f"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + [[package]] name = "google-auth" version = "2.34.0" @@ -1336,23 +1404,6 @@ files = [ docs = ["Sphinx", "furo"] test = ["objgraph", "psutil"] -[[package]] -name = "grpc-interceptor" -version = "0.15.4" -description = "Simplifies gRPC interceptors" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "grpc-interceptor-0.15.4.tar.gz", hash = "sha256:1f45c0bcb58b6f332f37c637632247c9b02bc6af0fdceb7ba7ce8d2ebbfb0926"}, - {file = "grpc_interceptor-0.15.4-py3-none-any.whl", hash = "sha256:0035f33228693ed3767ee49d937bac424318db173fef4d2d0170b3215f254d9d"}, -] - -[package.dependencies] -grpcio = ">=1.49.1,<2.0.0" - -[package.extras] -testing = ["protobuf (>=4.21.9)"] - [[package]] name = "grpcio" version = "1.65.5" @@ -1527,6 +1578,23 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "hypothesis" version = "6.111.1" @@ -1736,6 +1804,31 @@ files = [ [package.dependencies] six = "*" +[[package]] +name = "kr8s" +version = "0.17.0" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.0-py3-none-any.whl", hash = "sha256:7307bca7b125cdc8c41ec9d7a0b3b1273c4c76b10b992a054aaf1e38309f1445"}, + {file = "kr8s-0.17.0.tar.gz", hash = "sha256:c2afe40461f1b1c853dcde755a64fe4837e05b931c6effbfff12ab32ae224445"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -2260,6 +2353,23 @@ files = [ prometheus-client = ">=0.7.1,<0.8.0" sanic = ">=22.0.0" +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + [[package]] name = "protobuf" version = "5.27.3" @@ -2747,6 +2857,41 @@ psutil = ["psutil (>=3.0)"] setproctitle = ["setproctitle"] testing = ["filelock"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -2781,6 +2926,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.1.1" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.1.1-py3-none-any.whl", hash = "sha256:43f2622b7aaaf4f45dd873e80cfd181058503e08ffdeac5218135f3a97bd0aec"}, + {file = "python_jsonpath-1.1.1.tar.gz", hash = "sha256:d2944e1f7a1d6c8fa958724f9570b8f04a4e00ab6bf1e4733346ab8dcef1f74f"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -3122,24 +3278,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -3147,7 +3303,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -3155,7 +3311,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -3163,7 +3319,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -3558,19 +3714,30 @@ pbr = ">=2.0.0,<2.1.0 || >2.1.0" [[package]] name = "tenacity" -version = "9.0.0" +version = "8.5.0" description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" files = [ - {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, - {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, ] [package.extras] doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.1" @@ -3698,6 +3865,17 @@ files = [ {file = "types_setuptools-72.2.0.20240821-py3-none-any.whl", hash = "sha256:260e89d6d3b42cc35f9f0f382d030713b7b547344a664c05c9175e6ba124fac7"}, ] +[[package]] +name = "types-toml" +version = "0.10.8.20240310" +description = "Typing stubs for toml" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-toml-0.10.8.20240310.tar.gz", hash = "sha256:3d41501302972436a6b8b239c850b26689657e25281b48ff0ec06345b8830331"}, + {file = "types_toml-0.10.8.20240310-py3-none-any.whl", hash = "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d"}, +] + [[package]] name = "types-urllib3" version = "1.26.25.14" @@ -4038,6 +4216,20 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [[package]] name = "yarl" version = "1.9.4" @@ -4144,4 +4336,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.12" -content-hash = "a44608066433ede3cf9875cebac61b84b8f50a56a0455a7ed774f651d1bfe065" +content-hash = "4997e564fe033ab0bc21d87e37a369d25dce32ed76950522f5170f17490c0227" diff --git a/projects/background_jobs/poetry.lock b/projects/background_jobs/poetry.lock index c485b846c..87d5d7b6a 100644 --- a/projects/background_jobs/poetry.lock +++ b/projects/background_jobs/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiofile" @@ -78,6 +78,20 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -548,6 +562,17 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" +[[package]] +name = "escapism" +version = "1.0.1" +description = "Simple, generic API for escaping strings." +optional = false +python-versions = "*" +files = [ + {file = "escapism-1.0.1-py2.py3-none-any.whl", hash = "sha256:d28f19edc3cb1ffc36fa238956ecc068695477e748f57157c6dde00a6b77f229"}, + {file = "escapism-1.0.1.tar.gz", hash = "sha256:73256bdfb4f22230f0428fc6efecee61cdc4fad531b6f98b849cb9c80711e4ec"}, +] + [[package]] name = "factory-boy" version = "3.3.0" @@ -950,6 +975,23 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "idna" version = "3.7" @@ -997,6 +1039,31 @@ files = [ {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, ] +[[package]] +name = "kr8s" +version = "0.17.2" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.2-py3-none-any.whl", hash = "sha256:5beba0ccf08c7a2305c0fa8f85fa8d2fe7d3f265872f718489e1bea3162fa91b"}, + {file = "kr8s-0.17.2.tar.gz", hash = "sha256:536d08c3f701365e6ac5ce42c0e8313aa6e6740f92b7077f28209e892af046ab"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -1112,6 +1179,25 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -1245,6 +1331,17 @@ rsa = ["cryptography (>=3.0.0)"] signals = ["blinker (>=1.4.0)"] signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + [[package]] name = "prometheus-client" version = "0.7.1" @@ -1607,6 +1704,41 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1641,6 +1773,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.2.0" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.2.0-py3-none-any.whl", hash = "sha256:3172c7b87098fced1ed84bd3492bd1a19ef1ad41d4f5b8a3e9a147c750ac08b3"}, + {file = "python_jsonpath-1.2.0.tar.gz", hash = "sha256:a29a84ec3ac38e5dcaa62ac2a215de72c4eb60cb1303e10700da980cf7873775"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -1678,7 +1821,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1686,16 +1828,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1712,7 +1846,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1720,7 +1853,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1835,24 +1967,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -1860,7 +1992,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -1868,7 +2000,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -1876,7 +2008,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -2154,6 +2286,17 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tracerite" version = "1.1.1" @@ -2446,6 +2589,37 @@ files = [ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] +[[package]] +name = "werkzeug" +version = "3.0.4" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c"}, + {file = "werkzeug-3.0.4.tar.gz", hash = "sha256:34f2371506b250df4d4f84bfe7b0921e4762525762bbd936614909fe25cd7306"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [metadata] lock-version = "2.0" python-versions = "^3.12" diff --git a/projects/background_jobs/pyproject.toml b/projects/background_jobs/pyproject.toml index cb13e197f..99a184755 100644 --- a/projects/background_jobs/pyproject.toml +++ b/projects/background_jobs/pyproject.toml @@ -33,6 +33,7 @@ packages = [ { include = "renku_data_services/session", from = "../../components" }, { include = "renku_data_services/platform", from = "../../components" }, { include = "renku_data_services/migrations", from = "../../components" }, + { include = "renku_data_services/notebooks", from = "../../components" }, ] [tool.poetry.dependencies] @@ -62,6 +63,11 @@ sentry-sdk = { version = "^2.14.0", extras = ["sanic"] } # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } aiofile = "^3.8.8" +escapism = "^1.0.1" +kr8s = "^0.17.2" +marshmallow = "^3.22.0" +toml = "^0.10.2" +werkzeug = "^3.0.4" [tool.poetry.group.dev.dependencies] pyavro-gen = "^0.3.3" diff --git a/projects/renku_data_service/poetry.lock b/projects/renku_data_service/poetry.lock index 11deb5193..994b0d5bf 100644 --- a/projects/renku_data_service/poetry.lock +++ b/projects/renku_data_service/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiofile" @@ -28,6 +28,115 @@ files = [ {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, ] +[[package]] +name = "aiohttp" +version = "3.9.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + [[package]] name = "alembic" version = "1.13.2" @@ -78,6 +187,17 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "appier" +version = "1.34.4" +description = "Appier Framework" +optional = false +python-versions = "*" +files = [ + {file = "appier-1.34.4-py2.py3-none-any.whl", hash = "sha256:96769bf56e015175f798958d6eeb189b6fbc2ca799f3b7d2c9bd463eb45a12ae"}, + {file = "appier-1.34.4.tar.gz", hash = "sha256:dd3b244ee2797c7ceda0b81f2331e39724262d4e838f1e53866543136d88ee96"}, +] + [[package]] name = "argcomplete" version = "3.4.0" @@ -92,6 +212,20 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -146,6 +280,25 @@ files = [ docs = ["Sphinx (>=5.3.0,<5.4.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] test = ["flake8 (>=6.1,<7.0)", "uvloop (>=0.15.3)"] +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + [[package]] name = "authlib" version = "1.3.2" @@ -162,20 +315,21 @@ cryptography = "*" [[package]] name = "authzed" -version = "0.18.3" +version = "0.15.0" description = "Client library for SpiceDB." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "authzed-0.18.3-py3-none-any.whl", hash = "sha256:367baa6f00dac68daad814f7db82464576530a11806ac4b17978d8c0e89511b7"}, - {file = "authzed-0.18.3.tar.gz", hash = "sha256:a06a930c78fdbe61f5caf4415739a9941d23068527fcf91011edc1ae0188a4f5"}, + {file = "authzed-0.15.0-py3-none-any.whl", hash = "sha256:b14069a5fce970b0b4fc05dc86d41cfd3d37bb47adc9dc83ac04adb31380e566"}, + {file = "authzed-0.15.0.tar.gz", hash = "sha256:213dbdd5ae27d98189c138e70be309dbd36d03a84b4e6a048bfeb2595db42764"}, ] [package.dependencies] -googleapis-common-protos = ">=1.65.0,<2.0.0" -grpc-interceptor = ">=0.15.4,<0.16.0" +google_api = ">=0.1.12,<0.2.0" +google-api-core = ">=2.4.0,<3.0.0" grpcio = ">=1.63,<2.0" protobuf = ">=5.26,<6" +typing-extensions = ">=3.7.4,<5" [[package]] name = "avro-preprocessor" @@ -288,13 +442,13 @@ files = [ [[package]] name = "certifi" -version = "2024.6.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, - {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -487,38 +641,43 @@ files = [ [[package]] name = "cryptography" -version = "43.0.1" +version = "42.0.8" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, + {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, + {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, + {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, + {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, + {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, + {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, ] [package.dependencies] @@ -531,7 +690,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -658,6 +817,17 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" +[[package]] +name = "escapism" +version = "1.0.1" +description = "Simple, generic API for escaping strings." +optional = false +python-versions = "*" +files = [ + {file = "escapism-1.0.1-py2.py3-none-any.whl", hash = "sha256:d28f19edc3cb1ffc36fa238956ecc068695477e748f57157c6dde00a6b77f229"}, + {file = "escapism-1.0.1.tar.gz", hash = "sha256:73256bdfb4f22230f0428fc6efecee61cdc4fad531b6f98b849cb9c80711e4ec"}, +] + [[package]] name = "factory-boy" version = "3.3.0" @@ -678,13 +848,13 @@ doc = ["Sphinx", "sphinx-rtd-theme", "sphinxcontrib-spelling"] [[package]] name = "faker" -version = "25.9.1" +version = "26.0.0" description = "Faker is a Python package that generates fake data for you." optional = false python-versions = ">=3.8" files = [ - {file = "Faker-25.9.1-py3-none-any.whl", hash = "sha256:f1dc27dc8035cb7e97e96afbb5fe1305eed6aeea53374702cbac96acfe851626"}, - {file = "Faker-25.9.1.tar.gz", hash = "sha256:0e1cf7a8d3c94de91a65ab1e9cf7050903efae1e97901f8e5924a9f45147ae44"}, + {file = "Faker-26.0.0-py3-none-any.whl", hash = "sha256:886ee28219be96949cd21ecc96c4c742ee1680e77f687b095202c8def1a08f06"}, + {file = "Faker-26.0.0.tar.gz", hash = "sha256:0f60978314973de02c00474c2ae899785a42b2cf4f41b7987e93c132a2b8a4a9"}, ] [package.dependencies] @@ -714,42 +884,42 @@ probabilistic = ["pyprobables (>=0.6,<0.7)"] [[package]] name = "fastavro" -version = "1.9.4" +version = "1.9.5" description = "Fast read/write of AVRO files" optional = false python-versions = ">=3.8" files = [ - {file = "fastavro-1.9.4-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:60cb38f07462a7fb4e4440ed0de67d3d400ae6b3d780f81327bebde9aa55faef"}, - {file = "fastavro-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:063d01d197fc929c20adc09ca9f0ca86d33ac25ee0963ce0b438244eee8315ae"}, - {file = "fastavro-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87a9053fcfbc895f2a16a4303af22077e3a8fdcf1cd5d6ed47ff2ef22cbba2f0"}, - {file = "fastavro-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:02bf1276b7326397314adf41b34a4890f6ffa59cf7e0eb20b9e4ab0a143a1598"}, - {file = "fastavro-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56bed9eca435389a8861e6e2d631ec7f8f5dda5b23f93517ac710665bd34ca29"}, - {file = "fastavro-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:0cd2099c8c672b853e0b20c13e9b62a69d3fbf67ee7c59c7271ba5df1680310d"}, - {file = "fastavro-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:af8c6d8c43a02b5569c093fc5467469541ac408c79c36a5b0900d3dd0b3ba838"}, - {file = "fastavro-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4a138710bd61580324d23bc5e3df01f0b82aee0a76404d5dddae73d9e4c723f"}, - {file = "fastavro-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:903d97418120ca6b6a7f38a731166c1ccc2c4344ee5e0470d09eb1dc3687540a"}, - {file = "fastavro-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c443eeb99899d062dbf78c525e4614dd77e041a7688fa2710c224f4033f193ae"}, - {file = "fastavro-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ac26ab0774d1b2b7af6d8f4300ad20bbc4b5469e658a02931ad13ce23635152f"}, - {file = "fastavro-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:cf7247874c22be856ba7d1f46a0f6e0379a6025f1a48a7da640444cbac6f570b"}, - {file = "fastavro-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:68912f2020e1b3d70557260b27dd85fb49a4fc6bfab18d384926127452c1da4c"}, - {file = "fastavro-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6925ce137cdd78e109abdb0bc33aad55de6c9f2d2d3036b65453128f2f5f5b92"}, - {file = "fastavro-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b928cd294e36e35516d0deb9e104b45be922ba06940794260a4e5dbed6c192a"}, - {file = "fastavro-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:90c9838bc4c991ffff5dd9d88a0cc0030f938b3fdf038cdf6babde144b920246"}, - {file = "fastavro-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:eca6e54da571b06a3c5a72dbb7212073f56c92a6fbfbf847b91c347510f8a426"}, - {file = "fastavro-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4b02839ac261100cefca2e2ad04cdfedc556cb66b5ec735e0db428e74b399de"}, - {file = "fastavro-1.9.4-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:4451ee9a305a73313a1558d471299f3130e4ecc10a88bf5742aa03fb37e042e6"}, - {file = "fastavro-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8524fccfb379565568c045d29b2ebf71e1f2c0dd484aeda9fe784ef5febe1a8"}, - {file = "fastavro-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d0a00a6e09baa20f6f038d7a2ddcb7eef0e7a9980e947a018300cb047091b8"}, - {file = "fastavro-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23d7e5b29c9bf6f26e8be754b2c8b919838e506f78ef724de7d22881696712fc"}, - {file = "fastavro-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e6ab3ee53944326460edf1125b2ad5be2fadd80f7211b13c45fa0c503b4cf8d"}, - {file = "fastavro-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:64d335ec2004204c501f8697c385d0a8f6b521ac82d5b30696f789ff5bc85f3c"}, - {file = "fastavro-1.9.4-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:7e05f44c493e89e73833bd3ff3790538726906d2856f59adc8103539f4a1b232"}, - {file = "fastavro-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:253c63993250bff4ee7b11fb46cf3a4622180a783bedc82a24c6fdcd1b10ca2a"}, - {file = "fastavro-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24d6942eb1db14640c2581e0ecd1bbe0afc8a83731fcd3064ae7f429d7880cb7"}, - {file = "fastavro-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d47bb66be6091cd48cfe026adcad11c8b11d7d815a2949a1e4ccf03df981ca65"}, - {file = "fastavro-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c293897f12f910e58a1024f9c77f565aa8e23b36aafda6ad8e7041accc57a57f"}, - {file = "fastavro-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:f05d2afcb10a92e2a9e580a3891f090589b3e567fdc5641f8a46a0b084f120c3"}, - {file = "fastavro-1.9.4.tar.gz", hash = "sha256:56b8363e360a1256c94562393dc7f8611f3baf2b3159f64fb2b9c6b87b14e876"}, + {file = "fastavro-1.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:61253148e95dd2b6457247b441b7555074a55de17aef85f5165bfd5facf600fc"}, + {file = "fastavro-1.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b604935d671ad47d888efc92a106f98e9440874108b444ac10e28d643109c937"}, + {file = "fastavro-1.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0adbf4956fd53bd74c41e7855bb45ccce953e0eb0e44f5836d8d54ad843f9944"}, + {file = "fastavro-1.9.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:53d838e31457db8bf44460c244543f75ed307935d5fc1d93bc631cc7caef2082"}, + {file = "fastavro-1.9.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:07b6288e8681eede16ff077632c47395d4925c2f51545cd7a60f194454db2211"}, + {file = "fastavro-1.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:ef08cf247fdfd61286ac0c41854f7194f2ad05088066a756423d7299b688d975"}, + {file = "fastavro-1.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c52d7bb69f617c90935a3e56feb2c34d4276819a5c477c466c6c08c224a10409"}, + {file = "fastavro-1.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85e05969956003df8fa4491614bc62fe40cec59e94d06e8aaa8d8256ee3aab82"}, + {file = "fastavro-1.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06e6df8527493a9f0d9a8778df82bab8b1aa6d80d1b004e5aec0a31dc4dc501c"}, + {file = "fastavro-1.9.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:27820da3b17bc01cebb6d1687c9d7254b16d149ef458871aaa207ed8950f3ae6"}, + {file = "fastavro-1.9.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:195a5b8e33eb89a1a9b63fa9dce7a77d41b3b0cd785bac6044df619f120361a2"}, + {file = "fastavro-1.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:be612c109efb727bfd36d4d7ed28eb8e0506617b7dbe746463ebbf81e85eaa6b"}, + {file = "fastavro-1.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b133456c8975ec7d2a99e16a7e68e896e45c821b852675eac4ee25364b999c14"}, + {file = "fastavro-1.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf586373c3d1748cac849395aad70c198ee39295f92e7c22c75757b5c0300fbe"}, + {file = "fastavro-1.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:724ef192bc9c55d5b4c7df007f56a46a21809463499856349d4580a55e2b914c"}, + {file = "fastavro-1.9.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bfd11fe355a8f9c0416803afac298960eb4c603a23b1c74ff9c1d3e673ea7185"}, + {file = "fastavro-1.9.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9827d1654d7bcb118ef5efd3e5b2c9ab2a48d44dac5e8c6a2327bc3ac3caa828"}, + {file = "fastavro-1.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:d84b69dca296667e6137ae7c9a96d060123adbc0c00532cc47012b64d38b47e9"}, + {file = "fastavro-1.9.5-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:fb744e9de40fb1dc75354098c8db7da7636cba50a40f7bef3b3fb20f8d189d88"}, + {file = "fastavro-1.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:240df8bacd13ff5487f2465604c007d686a566df5cbc01d0550684eaf8ff014a"}, + {file = "fastavro-1.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3bb35c25bbc3904e1c02333bc1ae0173e0a44aa37a8e95d07e681601246e1f1"}, + {file = "fastavro-1.9.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b47a54a9700de3eabefd36dabfb237808acae47bc873cada6be6990ef6b165aa"}, + {file = "fastavro-1.9.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:48c7b5e6d2f3bf7917af301c275b05c5be3dd40bb04e80979c9e7a2ab31a00d1"}, + {file = "fastavro-1.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:05d13f98d4e325be40387e27da9bd60239968862fe12769258225c62ec906f04"}, + {file = "fastavro-1.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5b47948eb196263f6111bf34e1cd08d55529d4ed46eb50c1bc8c7c30a8d18868"}, + {file = "fastavro-1.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85b7a66ad521298ad9373dfe1897a6ccfc38feab54a47b97922e213ae5ad8870"}, + {file = "fastavro-1.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44cb154f863ad80e41aea72a709b12e1533b8728c89b9b1348af91a6154ab2f5"}, + {file = "fastavro-1.9.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b5f7f2b1fe21231fd01f1a2a90e714ae267fe633cd7ce930c0aea33d1c9f4901"}, + {file = "fastavro-1.9.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88fbbe16c61d90a89d78baeb5a34dc1c63a27b115adccdbd6b1fb6f787deacf2"}, + {file = "fastavro-1.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:753f5eedeb5ca86004e23a9ce9b41c5f25eb64a876f95edcc33558090a7f3e4b"}, + {file = "fastavro-1.9.5.tar.gz", hash = "sha256:6419ebf45f88132a9945c51fe555d4f10bb97c236288ed01894f957c6f914553"}, ] [package.extras] @@ -758,6 +928,92 @@ lz4 = ["lz4"] snappy = ["cramjam"] zstandard = ["zstandard"] +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + [[package]] name = "genson" version = "1.3.0" @@ -769,15 +1025,52 @@ files = [ {file = "genson-1.3.0.tar.gz", hash = "sha256:e02db9ac2e3fd29e65b5286f7135762e2cd8a986537c075b06fc5f1517308e37"}, ] +[[package]] +name = "google-api" +version = "0.1.12" +description = "Google API Client" +optional = false +python-versions = "*" +files = [ + {file = "google_api-0.1.12-py2.py3-none-any.whl", hash = "sha256:618f9f2076482a128c408867b5398b291938fe8e653ed7f8ed58fce5042f0c75"}, + {file = "google_api-0.1.12.tar.gz", hash = "sha256:5611c87cdfc6b72927a5e2ea9299ddd6f3a206e29a342b86d3ff3ecc351c30a3"}, +] + +[package.dependencies] +appier = "*" + +[[package]] +name = "google-api-core" +version = "2.19.1" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-core-2.19.1.tar.gz", hash = "sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd"}, + {file = "google_api_core-2.19.1-py3-none-any.whl", hash = "sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + [[package]] name = "google-auth" -version = "2.30.0" +version = "2.32.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"}, - {file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"}, + {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, + {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, ] [package.dependencies] @@ -794,13 +1087,13 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "googleapis-common-protos" -version = "1.65.0" +version = "1.63.2" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, - {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, + {file = "googleapis-common-protos-1.63.2.tar.gz", hash = "sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87"}, + {file = "googleapis_common_protos-1.63.2-py2.py3-none-any.whl", hash = "sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945"}, ] [package.dependencies] @@ -880,23 +1173,6 @@ files = [ docs = ["Sphinx", "furo"] test = ["objgraph", "psutil"] -[[package]] -name = "grpc-interceptor" -version = "0.15.4" -description = "Simplifies gRPC interceptors" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "grpc-interceptor-0.15.4.tar.gz", hash = "sha256:1f45c0bcb58b6f332f37c637632247c9b02bc6af0fdceb7ba7ce8d2ebbfb0926"}, - {file = "grpc_interceptor-0.15.4-py3-none-any.whl", hash = "sha256:0035f33228693ed3767ee49d937bac424318db173fef4d2d0170b3215f254d9d"}, -] - -[package.dependencies] -grpcio = ">=1.49.1,<2.0.0" - -[package.extras] -testing = ["protobuf (>=4.21.9)"] - [[package]] name = "grpcio" version = "1.64.1" @@ -1071,6 +1347,23 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "idna" version = "3.7" @@ -1150,6 +1443,31 @@ files = [ {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, ] +[[package]] +name = "kr8s" +version = "0.17.0" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.0-py3-none-any.whl", hash = "sha256:7307bca7b125cdc8c41ec9d7a0b3b1273c4c76b10b992a054aaf1e38309f1445"}, + {file = "kr8s-0.17.0.tar.gz", hash = "sha256:c2afe40461f1b1c853dcde755a64fe4837e05b931c6effbfff12ab32ae224445"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -1177,6 +1495,25 @@ websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" [package.extras] adal = ["adal (>=1.0.2)"] +[[package]] +name = "kubernetes-asyncio" +version = "30.1.1" +description = "Kubernetes asynchronous python client" +optional = false +python-versions = "*" +files = [ + {file = "kubernetes_asyncio-30.1.1-py3-none-any.whl", hash = "sha256:3bb40d906ba37f5553bbf0ee9b69947bf14b93c481ed69e2a5ab02aa6ded33d7"}, + {file = "kubernetes_asyncio-30.1.1.tar.gz", hash = "sha256:7523f8650bedb0c9cf5264f2b043ee94fab9b0d29a142c63d59d435bd9df66d7"}, +] + +[package.dependencies] +aiohttp = ">=3.9.0,<4.0.0" +certifi = ">=14.05.14" +python-dateutil = ">=2.5.3" +pyyaml = ">=3.12" +six = ">=1.9.0" +urllib3 = ">=1.24.2" + [[package]] name = "mako" version = "1.3.5" @@ -1265,6 +1602,25 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "marshmallow" +version = "3.21.3" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -1475,24 +1831,41 @@ files = [ prometheus-client = ">=0.7.1,<0.8.0" sanic = ">=22.0.0" +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + [[package]] name = "protobuf" -version = "5.27.1" +version = "5.27.2" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.27.1-cp310-abi3-win32.whl", hash = "sha256:3adc15ec0ff35c5b2d0992f9345b04a540c1e73bfee3ff1643db43cc1d734333"}, - {file = "protobuf-5.27.1-cp310-abi3-win_amd64.whl", hash = "sha256:25236b69ab4ce1bec413fd4b68a15ef8141794427e0b4dc173e9d5d9dffc3bcd"}, - {file = "protobuf-5.27.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4e38fc29d7df32e01a41cf118b5a968b1efd46b9c41ff515234e794011c78b17"}, - {file = "protobuf-5.27.1-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:917ed03c3eb8a2d51c3496359f5b53b4e4b7e40edfbdd3d3f34336e0eef6825a"}, - {file = "protobuf-5.27.1-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:ee52874a9e69a30271649be88ecbe69d374232e8fd0b4e4b0aaaa87f429f1631"}, - {file = "protobuf-5.27.1-cp38-cp38-win32.whl", hash = "sha256:7a97b9c5aed86b9ca289eb5148df6c208ab5bb6906930590961e08f097258107"}, - {file = "protobuf-5.27.1-cp38-cp38-win_amd64.whl", hash = "sha256:f6abd0f69968792da7460d3c2cfa7d94fd74e1c21df321eb6345b963f9ec3d8d"}, - {file = "protobuf-5.27.1-cp39-cp39-win32.whl", hash = "sha256:dfddb7537f789002cc4eb00752c92e67885badcc7005566f2c5de9d969d3282d"}, - {file = "protobuf-5.27.1-cp39-cp39-win_amd64.whl", hash = "sha256:39309898b912ca6febb0084ea912e976482834f401be35840a008da12d189340"}, - {file = "protobuf-5.27.1-py3-none-any.whl", hash = "sha256:4ac7249a1530a2ed50e24201d6630125ced04b30619262f06224616e0030b6cf"}, - {file = "protobuf-5.27.1.tar.gz", hash = "sha256:df5e5b8e39b7d1c25b186ffdf9f44f40f810bbcc9d2b71d9d3156fee5a9adf15"}, + {file = "protobuf-5.27.2-cp310-abi3-win32.whl", hash = "sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38"}, + {file = "protobuf-5.27.2-cp310-abi3-win_amd64.whl", hash = "sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505"}, + {file = "protobuf-5.27.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5"}, + {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b"}, + {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e"}, + {file = "protobuf-5.27.2-cp38-cp38-win32.whl", hash = "sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863"}, + {file = "protobuf-5.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6"}, + {file = "protobuf-5.27.2-cp39-cp39-win32.whl", hash = "sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca"}, + {file = "protobuf-5.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce"}, + {file = "protobuf-5.27.2-py3-none-any.whl", hash = "sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470"}, + {file = "protobuf-5.27.2.tar.gz", hash = "sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714"}, ] [[package]] @@ -1809,6 +2182,41 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1843,6 +2251,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.1.1" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.1.1-py3-none-any.whl", hash = "sha256:43f2622b7aaaf4f45dd873e80cfd181058503e08ffdeac5218135f3a97bd0aec"}, + {file = "python_jsonpath-1.1.1.tar.gz", hash = "sha256:d2944e1f7a1d6c8fa958724f9570b8f04a4e00ab6bf1e4733346ab8dcef1f74f"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -1880,7 +2299,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1888,16 +2306,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1914,7 +2324,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1922,7 +2331,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2037,24 +2445,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -2062,7 +2470,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -2070,7 +2478,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -2078,7 +2486,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -2343,19 +2751,30 @@ sqlcipher = ["sqlcipher3_binary"] [[package]] name = "tenacity" -version = "9.0.0" +version = "8.5.0" description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" files = [ - {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, - {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, ] [package.extras] doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tracerite" version = "1.1.1" @@ -2648,7 +3067,141 @@ files = [ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] +[[package]] +name = "werkzeug" +version = "3.0.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, + {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + +[[package]] +name = "yarl" +version = "1.9.4" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + [metadata] lock-version = "2.0" python-versions = "^3.12" -content-hash = "a30121364668554c09657a0659da85a871d7da09c87c76e4026d51b2659dce2b" +content-hash = "45ca91ca0163e88901c9c6e46ac554b7f4bc1398aafa30fd85120a8c7a18f6d2" diff --git a/projects/renku_data_service/pyproject.toml b/projects/renku_data_service/pyproject.toml index 4b21c1001..fd24bfbe0 100644 --- a/projects/renku_data_service/pyproject.toml +++ b/projects/renku_data_service/pyproject.toml @@ -29,6 +29,7 @@ packages = [ { include = "renku_data_services/storage", from = "../../components" }, { include = "renku_data_services/users", from = "../../components" }, { include = "renku_data_services/utils", from = "../../components" }, + { include = "renku_data_services/notebooks", from = "../../components" }, # Note: poetry poly does not detect the migrations as dependencies, but they are. Don't remove these! { include = "renku_data_services/migrations", from = "../../components" }, ] @@ -55,11 +56,18 @@ redis = "^5.0.8" dataclasses-avroschema = "^0.63.0" undictify = "^0.11.3" prometheus-sanic = "^3.0.0" -sentry-sdk = {version = "^2.14.0", extras = ["sanic"]} -authzed = "^0.18.3" +sentry-sdk = {version = "^2.6.0", extras = ["sanic"]} +authzed = "^0.15.0" +cryptography = "^42.0.5" +kubernetes-asyncio = "^30.1.0" +marshmallow = "^3.21.3" +escapism = "^1.0.1" +kr8s = "^0.17.0" +werkzeug = "^3.0.3" # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } aiofile = "^3.8.8" +toml = "^0.10.2" [tool.poetry.group.dev.dependencies] pyavro-gen = "^0.3.3" diff --git a/projects/secrets_storage/poetry.lock b/projects/secrets_storage/poetry.lock index b383540bd..9b23ac3e9 100644 --- a/projects/secrets_storage/poetry.lock +++ b/projects/secrets_storage/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiofile" @@ -92,6 +92,20 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -657,6 +671,17 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" +[[package]] +name = "escapism" +version = "1.0.1" +description = "Simple, generic API for escaping strings." +optional = false +python-versions = "*" +files = [ + {file = "escapism-1.0.1-py2.py3-none-any.whl", hash = "sha256:d28f19edc3cb1ffc36fa238956ecc068695477e748f57157c6dde00a6b77f229"}, + {file = "escapism-1.0.1.tar.gz", hash = "sha256:73256bdfb4f22230f0428fc6efecee61cdc4fad531b6f98b849cb9c80711e4ec"}, +] + [[package]] name = "factory-boy" version = "3.3.0" @@ -1048,6 +1073,23 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "idna" version = "3.7" @@ -1116,6 +1158,31 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "kr8s" +version = "0.17.2" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.2-py3-none-any.whl", hash = "sha256:5beba0ccf08c7a2305c0fa8f85fa8d2fe7d3f265872f718489e1bea3162fa91b"}, + {file = "kr8s-0.17.2.tar.gz", hash = "sha256:536d08c3f701365e6ac5ce42c0e8313aa6e6740f92b7077f28209e892af046ab"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -1231,6 +1298,25 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -1775,6 +1861,41 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1809,6 +1930,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.2.0" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.2.0-py3-none-any.whl", hash = "sha256:3172c7b87098fced1ed84bd3492bd1a19ef1ad41d4f5b8a3e9a147c750ac08b3"}, + {file = "python_jsonpath-1.2.0.tar.gz", hash = "sha256:a29a84ec3ac38e5dcaa62ac2a215de72c4eb60cb1303e10700da980cf7873775"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -1846,7 +1978,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1854,16 +1985,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1880,7 +2003,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1888,7 +2010,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2003,24 +2124,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -2028,7 +2149,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -2036,7 +2157,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -2044,7 +2165,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -2311,6 +2432,17 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tracerite" version = "1.1.1" @@ -2658,6 +2790,37 @@ files = [ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] +[[package]] +name = "werkzeug" +version = "3.0.4" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c"}, + {file = "werkzeug-3.0.4.tar.gz", hash = "sha256:34f2371506b250df4d4f84bfe7b0921e4762525762bbd936614909fe25cd7306"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [metadata] lock-version = "2.0" python-versions = "^3.12" diff --git a/projects/secrets_storage/pyproject.toml b/projects/secrets_storage/pyproject.toml index 38fe9a260..a69bb505a 100644 --- a/projects/secrets_storage/pyproject.toml +++ b/projects/secrets_storage/pyproject.toml @@ -31,6 +31,7 @@ packages = [ { include = "renku_data_services/storage", from = "../../components" }, { include = "renku_data_services/users", from = "../../components" }, { include = "renku_data_services/utils", from = "../../components" }, + { include = "renku_data_services/notebooks", from = "../../components" }, ] [tool.poetry.dependencies] @@ -62,6 +63,11 @@ authzed = "^0.18.3" # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } aiofile = "^3.8.8" +escapism = "^1.0.1" +kr8s = "^0.17.2" +marshmallow = "^3.22.0" +toml = "^0.10.2" +werkzeug = "^3.0.4" [tool.poetry.group.dev.dependencies] pyavro-gen = "^0.3.3" diff --git a/pyproject.toml b/pyproject.toml index cb0c13403..771ec8c9d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,11 +61,14 @@ prometheus_client = "^0.7.1" kubernetes-asyncio = "^31.1.0" marshmallow = "^3.21.3" escapism = "^1.0.1" -sentry-sdk = { version = "^2.14.0", extras = ["sanic"] } -authzed = "^0.18.3" -cryptography = "^43.0.1" +kr8s = "^0.17.0" +werkzeug = "^3.0.3" +sentry-sdk = { version = "^2.7.1", extras = ["sanic"] } +authzed = "^0.16.0" +cryptography = "^42.0.8" # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } +toml = "^0.10.2" aiofiles = "^24.1.0" [tool.poetry.group.dev.dependencies] @@ -87,7 +90,8 @@ fakeredis = "^2.24.1" ruff = "^0.6.7" debugpy = "^1.8.2" pytest-xdist = { version = "^3.5.0", extras = ["psutil"] } -types-requests = "^2.32.0.20240914" +types-requests = "^2.32.0.20240622" +types-toml = "^0.10.8.20240310" types-aiofiles = "^24.1.0.20240626" [build-system] @@ -99,7 +103,11 @@ line-length = 120 target-version = "py311" output-format = "full" include = ["*.py", "*.pyi"] -exclude = ["*/avro_models/*"] +exclude = [ + "*/avro_models/*", + "components/renku_data_services/notebooks/cr_amalthea_session.py", + "components/renku_data_services/notebooks/cr_jupyter_server.py", +] [tool.ruff.format] exclude = ["apispec.py"] @@ -138,6 +146,7 @@ ignore = [ "test/*" = ["D"] "*/versions/*" = ["D", "E", "W"] "apispec.py" = ["D", "E", "W", "I", "UP"] +"components/renku_data_services/notebooks/crs.py" = ["F401"] [tool.ruff.lint.isort] known-first-party = ["renku_data_services"] @@ -147,7 +156,12 @@ convention = "google" [tool.bandit] skips = ["B101", "B603", "B607", "B404"] -exclude_dirs = ["test", ".devcontainer"] +exclude_dirs = [ + "test", + ".devcontainer", + "components/renku_data_services/notebooks/cr_jupyter_server.py", + "components/renku_data_services/notebooks/cr_amalthea_session.py", +] [tool.pytest.ini_options] addopts = "--cov components/ --cov bases/ --cov-report=term-missing -v" @@ -200,6 +214,8 @@ module = [ "renku_data_services.data_api.error_handler", "renku_data_services.namespace.apispec", "renku_data_services.notebooks.apispec", + "renku_data_services.notebooks.cr_amalthea_session", + "renku_data_services.notebooks.cr_jupyter_server", "renku_data_services.platform.apispec", ] ignore_errors = true @@ -224,6 +240,7 @@ module = [ "undictify.*", "urllib3.*", "escapism.*", + "kr8s.*", ] ignore_missing_imports = true diff --git a/server_defaults.json b/server_defaults.json new file mode 100644 index 000000000..1050fbedd --- /dev/null +++ b/server_defaults.json @@ -0,0 +1,8 @@ +{ + "defaultUrl": "/lab", + "cpu_request": 0.5, + "mem_request": "1G", + "disk_request": "1G", + "gpu_request": 0, + "lfs_auto_fetch": false +} diff --git a/server_options.json b/server_options.json new file mode 100644 index 000000000..8a3b58692 --- /dev/null +++ b/server_options.json @@ -0,0 +1,56 @@ +{ + "defaultUrl": { + "order": 1, + "displayName": "Default Environment", + "type": "enum", + "default": "/lab", + "options": [ + "/lab" + ] + }, + "cpu_request": { + "order": 2, + "displayName": "Number of CPUs", + "type": "enum", + "default": 0.5, + "options": [ + 0.5, + 1 + ] + }, + "mem_request": { + "order": 3, + "displayName": "Amount of Memory", + "type": "enum", + "default": "1G", + "options": [ + "1G", + "2G" + ] + }, + "disk_request": { + "order": 4, + "displayName": "Amount of Storage", + "type": "enum", + "default": "1G", + "options": [ + "1G", + "10G" + ] + }, + "gpu_request": { + "order": 5, + "displayName": "Number of GPUs", + "type": "enum", + "default": 0, + "options": [ + 0 + ] + }, + "lfs_auto_fetch": { + "order": 6, + "displayName": "Automatically fetch LFS data", + "type": "boolean", + "default": false + } +} diff --git a/test/bases/renku_data_services/data_api/test_schemathesis.py b/test/bases/renku_data_services/data_api/test_schemathesis.py index b5fa72c6a..075ab6994 100644 --- a/test/bases/renku_data_services/data_api/test_schemathesis.py +++ b/test/bases/renku_data_services/data_api/test_schemathesis.py @@ -73,6 +73,12 @@ def filter_query(context: HookContext, query: dict[str, str] | None) -> bool: ("/oauth2/providers", "POST"), ] +# TODO: RE-enable schemathesis when CI setup for notebooks / sessions is ready +EXCLUDE_PATH_PREFIXES = [ + "/sessions", + "/notebooks", +] + @pytest.mark.schemathesis @pytest.mark.asyncio @@ -84,6 +90,9 @@ async def test_api_schemathesis( admin_headers: dict, requests_statistics: list[timedelta], ) -> None: + for exclude_prefix in EXCLUDE_PATH_PREFIXES: + if case.path.startswith(exclude_prefix): + return req_kwargs = case.as_requests_kwargs(headers=admin_headers) _, res = await sanic_client.request(**req_kwargs) res.request.uri = str(res.url) diff --git a/test/bases/renku_data_services/data_api/test_sessions.py b/test/bases/renku_data_services/data_api/test_sessions.py index 2be42158c..1aeffdb2d 100644 --- a/test/bases/renku_data_services/data_api/test_sessions.py +++ b/test/bases/renku_data_services/data_api/test_sessions.py @@ -1,9 +1,15 @@ """Tests for sessions blueprints.""" +from asyncio import AbstractEventLoop from typing import Any import pytest -from sanic_testing.testing import SanicASGITestClient +from pytest import FixtureRequest +from sanic_testing.testing import SanicASGITestClient, TestingResponse + +from renku_data_services.app_config.config import Config +from renku_data_services.crc.apispec import ResourcePool +from renku_data_services.users.models import UserInfo @pytest.fixture @@ -45,6 +51,33 @@ async def create_session_launcher_helper(name: str, project_id: str, **payload) return create_session_launcher_helper +@pytest.fixture +def launch_session( + sanic_client: SanicASGITestClient, + user_headers: dict, + regular_user: UserInfo, + app_config: Config, + request: FixtureRequest, + event_loop: AbstractEventLoop, +): + async def launch_session_helper( + payload: dict, headers: dict = user_headers, user: UserInfo = regular_user + ) -> TestingResponse: + _, res = await sanic_client.post("/api/data/sessions", headers=headers, json=payload) + assert res.status_code == 201, res.text + assert res.json is not None + assert "name" in res.json + session_id: str = res.json.get("name", "unknown") + + def cleanup(): + event_loop.run_until_complete(app_config.nb_config.k8s_v2_client.delete_server(session_id, user.id)) + + # request.addfinalizer(cleanup) + return res + + return launch_session_helper + + @pytest.mark.asyncio async def test_get_all_session_environments( sanic_client: SanicASGITestClient, unauthorized_headers, create_session_environment @@ -472,3 +505,56 @@ async def test_patch_session_launcher_environment( ) assert res.status_code == 200, res.text assert res.json["environment"]["container_image"] == "nginx:latest" + + +@pytest.fixture +def anonymous_user_headers() -> dict[str, str]: + return {"Renku-Auth-Anon-Id": "some-random-value-1234"} + + +@pytest.mark.asyncio +@pytest.mark.skip(reason="Setup for testing sessions is not done yet.") # TODO: enable in follwup PR +async def test_starting_session_anonymous( + sanic_client: SanicASGITestClient, + create_project, + create_session_launcher, + user_headers, + app_config: Config, + admin_headers, + launch_session, + anonymous_user_headers, +) -> None: + _, res = await sanic_client.post( + "/api/data/resource_pools", + json=ResourcePool.model_validate(app_config.default_resource_pool, from_attributes=True).model_dump( + mode="json", exclude_none=True + ), + headers=admin_headers, + ) + assert res.status_code == 201, res.text + project: dict[str, Any] = await create_project( + "Some project", + visibility="public", + repositories=["https://github.com/SwissDataScienceCenter/renku-data-services"], + ) + launcher: dict[str, Any] = await create_session_launcher( + "Launcher 1", + project_id=project["id"], + environment={ + "container_image": "renku/renkulab-py:3.10-0.23.0-amalthea-sessions-3", + "environment_kind": "CUSTOM", + "name": "test", + "port": 8888, + }, + ) + launcher_id = launcher["id"] + project_id = project["id"] + payload = {"project_id": project_id, "launcher_id": launcher_id} + session_res = await launch_session(payload, headers=anonymous_user_headers) + _, res = await sanic_client.get(f"/api/data/sessions/{session_res.json['name']}", headers=anonymous_user_headers) + assert res.status_code == 200, res.text + assert res.json["name"] == session_res.json["name"] + _, res = await sanic_client.get("/api/data/sessions", headers=anonymous_user_headers) + assert res.status_code == 200, res.text + assert len(res.json) > 0 + assert session_res.json["name"] in [i["name"] for i in res.json] diff --git a/test/components/renku_data_services/data_api/test_config.py b/test/components/renku_data_services/data_api/test_config.py index d0ba3ee17..20bb78542 100644 --- a/test/components/renku_data_services/data_api/test_config.py +++ b/test/components/renku_data_services/data_api/test_config.py @@ -77,6 +77,7 @@ def patch_kc_api(*args, **kwargs): DBConfig._async_engine = None +@pytest.mark.skip(reason="Re-enable when the k8s cluster for CI is fully setup") # TODO: address in followup PR def test_config_no_dummy(config_no_dummy_fixture: conf.Config) -> None: config = config_no_dummy_fixture assert config.authenticator is not None diff --git a/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py b/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py index 479232be9..46da44210 100644 --- a/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py +++ b/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py @@ -19,48 +19,35 @@ from test.utils import create_storage -def get_user(storage, valid: bool = True): - """Get an api user for a storage.""" - if valid: - user = APIUser( - is_admin=True, - id="abcdefg", - access_token="abcdefg", - full_name="John Doe", # nosec: B106 - ) - user._admin_project_id = storage.get("project_id") - else: - user = APIUser( - is_admin=True, - id="abcdefg", - access_token="abcdefg", - full_name="John Doe", # nosec: B106 - ) - user._admin_project_id = storage.get("project_id") + "0" - user._member_project_id = storage.get("project_id") + "0" - return user +@pytest.fixture() +def user(): + return APIUser( + is_admin=True, + id="abcdefg", + access_token="abcdefg", + full_name="John Doe", # nosec: B106 + ) @given(storage=storage_strat()) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio -async def test_storage_insert_get(storage: dict[str, Any], app_config: Config) -> None: +async def test_storage_insert_get(storage: dict[str, Any], app_config: Config, user: APIUser) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo with contextlib.suppress(ValidationError, errors.ValidationError): - await create_storage(storage, storage_repo, user=get_user(storage)) + await create_storage(storage, storage_repo, user=user) @given(storage=storage_strat(), new_source_path=a_path, new_target_path=a_path) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio async def test_storage_update_path( - storage: dict[str, Any], new_source_path: str, new_target_path: str, app_config: Config + storage: dict[str, Any], new_source_path: str, new_target_path: str, app_config: Config, user: APIUser ) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo try: - user = user = get_user(storage) inserted_storage = await create_storage(storage, storage_repo, user) assert inserted_storage.storage_id is not None @@ -77,11 +64,12 @@ async def test_storage_update_path( @given(storage=storage_strat(), new_config=st.one_of(s3_configuration(), azure_configuration())) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio -async def test_storage_update_config(storage: dict[str, Any], new_config: dict[str, Any], app_config: Config) -> None: +async def test_storage_update_config( + storage: dict[str, Any], new_config: dict[str, Any], app_config: Config, user: APIUser +) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo try: - user = user = get_user(storage) inserted_storage = await create_storage(storage, storage_repo, user) assert inserted_storage.storage_id is not None @@ -97,11 +85,10 @@ async def test_storage_update_config(storage: dict[str, Any], new_config: dict[s @given(storage=storage_strat()) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio -async def test_storage_delete(storage: dict[str, Any], app_config: Config) -> None: +async def test_storage_delete(storage: dict[str, Any], app_config: Config, user: APIUser) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo try: - user = user = get_user(storage) inserted_storage = await create_storage(storage, storage_repo, user) assert inserted_storage.storage_id is not None await storage_repo.delete_storage(storage_id=inserted_storage.storage_id, user=user)