Skip to content

Commit

Permalink
Merge pull request #1652 from DSD-DBS/workspace-mangement
Browse files Browse the repository at this point in the history
feat: Add workspace management for admins
  • Loading branch information
MoritzWeber0 authored Jul 25, 2024
2 parents d300295 + 117be93 commit f366ae0
Show file tree
Hide file tree
Showing 44 changed files with 2,007 additions and 303 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
Create Date: 2024-02-23 08:53:31.142987
"""
import uuid

import sqlalchemy as sa
from alembic import op

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# SPDX-FileCopyrightText: Copyright DB InfraGO AG and contributors
# SPDX-License-Identifier: Apache-2.0

"""Add workspaces table
Revision ID: a1e59021e0d0
Revises: 49f51db92903
Create Date: 2024-07-17 09:19:57.903328
"""
import sqlalchemy as sa
from alembic import op

# revision identifiers, used by Alembic.
revision = "a1e59021e0d0"
down_revision = "49f51db92903"
branch_labels = None
depends_on = None

t_users = sa.Table(
"users",
sa.MetaData(),
sa.Column("id", sa.Integer()),
sa.Column("name", sa.String()),
)


def upgrade():
connection = op.get_bind()
users = connection.execute(sa.select(t_users)).mappings().all()

t_workspaces = op.create_table(
"workspaces",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("pvc_name", sa.String(), nullable=False),
sa.Column("size", sa.String(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id", "user_id"),
sa.UniqueConstraint("pvc_name"),
)
op.create_index(
op.f("ix_workspaces_id"), "workspaces", ["id"], unique=False
)

for user in users:
pvc_name = (
"persistent-session-"
+ user["name"].replace("@", "-at-").replace(".", "-dot-").lower()
)
connection.execute(
t_workspaces.insert().values(
pvc_name=pvc_name,
size="20Gi",
user_id=user["id"],
)
)
136 changes: 101 additions & 35 deletions backend/capellacollab/cli/ws.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,15 @@
import contextlib
import datetime
import enum
import json
import logging
import pathlib
import select
import sys
import time
import typing as t

import pydantic
import typer
import websocket
from rich import console, pretty, table
Expand All @@ -27,11 +29,17 @@
)

MOUNT_PATH = "/workspace"
PERSISTENT_SESSION_PREFIX = "persistent-session-"
LEGACY_WORKSPACE_PREFIX = "persistent-session-"
PERSISTENT_WORKSPACE_PREFIX = "workspace-"

LOGGER = logging.getLogger(__name__)


class Sidecar(pydantic.BaseModel):
size: str | None = "20Gi"
annotations: dict[str, str] = {}


def init_kube():
from kubernetes import config

Expand Down Expand Up @@ -121,25 +129,42 @@ def volumes(
access_modes = ", ".join(item.spec.access_modes)
age: datetime.datetime = item.metadata.creation_timestamp

if pvc_name.startswith(PERSISTENT_SESSION_PREFIX):
if pvc_name.startswith(PERSISTENT_WORKSPACE_PREFIX):
annotations = item.metadata.annotations
volume_type = "Persistent user workspace"
if pvc_name.startswith(LEGACY_WORKSPACE_PREFIX):
annotations = {
"capellacollab/username": pvc_name.removeprefix(
PERSISTENT_SESSION_PREFIX
LEGACY_WORKSPACE_PREFIX
),
}
volume_type = "Persistent user workspace"
volume_type = "Persistent user workspace (legacy)"
elif pvc_name.startswith("shared-workspace-"):
annotations = {
"capellacollab/project_slug": item.metadata.labels.get(
"capellacollab/project_slug"
),
}
annotations = (
{
"capellacollab/project_slug": (
item.metadata.labels.get("capellacollab/project_slug")
),
}
if item.metadata.labels
else {}
)
volume_type = "Project-level file-share"

filtered_annotations = (
{
key.removeprefix("capellacollab/"): value
for key, value in annotations.items()
if key.startswith("capellacollab/")
}
if annotations
else {}
)

tbl.add_row(
pvc_name,
volume_type,
pretty.Pretty(annotations),
pretty.Pretty(filtered_annotations),
capacity,
storage_class,
access_modes,
Expand All @@ -160,6 +185,7 @@ def ls(
init_kube()

with pod_for_volume(volume_name, namespace) as pod_name:
wait_for_pod(pod_name, namespace)
for data in stream_tar_from_pod(pod_name, namespace, ["ls", path]):
sys.stdout.write(data.decode("utf-8", "replace"))

Expand All @@ -171,11 +197,31 @@ def backup(
out: pathlib.Path = pathlib.Path.cwd(),
):
"""Create a backup of all content in a Kubernetes Persistent Volume."""
from kubernetes import client

init_kube()
core_api = client.CoreV1Api()

targz = out / f"{volume_name}.tar.gz"
sidecar = out / f"{volume_name}.json"

pvc: client.V1PersistentVolumeClaim = (
core_api.read_namespaced_persistent_volume_claim(
name=volume_name, namespace=namespace
)
)

sidecar.write_text(
json.dumps(
Sidecar(
size=pvc.spec.resources.requests.get("storage", None),
annotations=pvc.metadata.annotations,
).model_dump()
)
)

with pod_for_volume(volume_name, namespace) as pod_name:
wait_for_pod(pod_name, namespace)
print(f"Downloading workspace volume to '{targz}'")

with targz.open("wb") as outfile:
Expand All @@ -190,6 +236,7 @@ def restore(
volume_name: str,
tarfile: t.Annotated[pathlib.Path, typer.Argument(exists=True)],
namespace: t.Annotated[str, NamespaceOption],
sidecar_path: t.Union[pathlib.Path, None] = None,
access_mode: str = "ReadWriteMany",
storage_class_name: str = "persistent-sessions-csi",
user_id: t.Union[str, None] = None,
Expand All @@ -206,21 +253,39 @@ def restore(

init_kube()

sidecar = Sidecar()
if sidecar_path and sidecar_path.exists():
print(f"Found sidecar at '{sidecar_path}'")
sidecar = Sidecar.model_validate_json(sidecar_path.read_text())

create_persistent_volume(
volume_name, namespace, access_mode, storage_class_name
volume_name, namespace, access_mode, storage_class_name, sidecar
)

with pod_for_volume(volume_name, namespace, read_only=False) as pod_name:
wait_for_pod(pod_name, namespace)
print(f"Restoring workspace volume to '{volume_name}'")

with tarfile.open("rb") as infile:
stream_tar_to_pod(pod_name, namespace, infile)

adjust_directory_permissions(
pod_name,
namespace,
user_id,
)
if user_id:
adjust_directory_permissions(
pod_name,
namespace,
user_id,
)


def wait_for_pod(
pod_name: str,
namespace: str,
):
timeout = 300 # seconds
while not is_pod_ready(pod_name, namespace) and timeout > 0:
print("Waiting for pod to come online...")
time.sleep(2)
timeout -= 2


@contextlib.contextmanager
Expand Down Expand Up @@ -273,16 +338,26 @@ def pod_for_volume(
),
)

core_v1_api.create_namespaced_pod(namespace, pod)
print(
f"Creating pod with name '{volume_name}' in namespace '{namespace}'..."
)

timeout = 300 # seconds
while not is_pod_ready(volume_name, namespace) and timeout > 0:
print("Waiting for pod to come online...")
time.sleep(2)
timeout -= 2
try:
core_v1_api.create_namespaced_pod(namespace, pod)
except client.exceptions.ApiException as e:
if e.status == 409:
print(
f"The pod with name '{volume_name}' already exists. "
"If the Pod is in terminating state, try again later. "
"Otherwise, delete it manually."
)
sys.exit(1)
else:
raise

yield volume_name

print("Deleting pod...")
core_v1_api.delete_namespaced_pod(volume_name, namespace)


Expand All @@ -291,6 +366,7 @@ def create_persistent_volume(
namespace: str,
access_mode: str,
storage_class_name: str,
sidecar: Sidecar,
):
"""Rebuild a PVC, according to the config defined in
`capellacollab/sessions/hooks/persistent_workspace.py`.
Expand All @@ -300,26 +376,17 @@ def create_persistent_volume(

core_v1_api = client.CoreV1Api()

username = (
name[len(PERSISTENT_SESSION_PREFIX) :]
if name.startswith(PERSISTENT_SESSION_PREFIX)
else name
)

pvc = client.V1PersistentVolumeClaim(
kind="PersistentVolumeClaim",
api_version="v1",
metadata=client.V1ObjectMeta(
name=name,
labels={
"capellacollab/username": username,
},
name=name, annotations=sidecar.annotations
),
spec=client.V1PersistentVolumeClaimSpec(
access_modes=[access_mode],
storage_class_name=storage_class_name,
resources=client.V1ResourceRequirements(
requests={"storage": "20Gi"}
requests={"storage": sidecar.size}
),
),
)
Expand All @@ -337,7 +404,7 @@ def create_persistent_volume(
def adjust_directory_permissions(
pod_name: str,
namespace: str,
user_id: str | None,
user_id: str,
directory: str = MOUNT_PATH,
):
from kubernetes import client, stream
Expand Down Expand Up @@ -420,7 +487,6 @@ def stream_tar_to_pod(pod_name, namespace, infile):
pod_name,
namespace,
command=["tar", "zxf", "-", "-C", "/"],
# command=["cat"],
stderr=True,
stdin=True,
stdout=True,
Expand Down
1 change: 1 addition & 0 deletions backend/capellacollab/core/database/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@
import capellacollab.tools.models
import capellacollab.users.models
import capellacollab.users.tokens.models
import capellacollab.users.workspaces.models
10 changes: 10 additions & 0 deletions backend/capellacollab/events/crud.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,16 @@ def get_events(
)


def get_event_by_id(
db: orm.Session, event_id: int
) -> models.DatabaseUserHistoryEvent | None:
return db.execute(
sa.select(models.DatabaseUserHistoryEvent).where(
models.DatabaseUserHistoryEvent.id == event_id
)
).scalar_one_or_none()


def delete_all_events_user_involved_in(db: orm.Session, user_id: int):
db.execute(
sa.delete(models.DatabaseUserHistoryEvent).where(
Expand Down
Loading

0 comments on commit f366ae0

Please sign in to comment.