Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/remove-grahpql #2222

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion package/kedro_viz/api/apps.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ async def favicon():
return FileResponse(_HTML_DIR / "favicon.ico")

@app.get("/")
@app.get("/experiment-tracking")
async def index():
heap_app_id = kedro_telemetry.get_heap_app_id(project_path)
heap_user_identity = kedro_telemetry.get_heap_identity()
Expand Down
143 changes: 2 additions & 141 deletions package/kedro_viz/api/graphql/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,160 +2,22 @@

from __future__ import annotations

import json
import logging
from typing import List, Optional

import strawberry
from graphql.validation import NoSchemaIntrospectionCustomRule
from packaging.version import parse
from strawberry import ID
from strawberry.extensions import AddValidationRules
from strawberry.tools import merge_types

from kedro_viz import __version__
from kedro_viz.data_access import data_access_manager
from kedro_viz.integrations.pypi import get_latest_version, is_running_outdated_version

from .serializers import (
format_run,
format_run_metric_data,
format_run_tracking_data,
format_runs,
)
from .types import (
MetricPlotDataset,
Run,
RunInput,
TrackingDataset,
TrackingDatasetGroup,
UpdateRunDetailsFailure,
UpdateRunDetailsResponse,
UpdateRunDetailsSuccess,
Version,
)
from .types import Version

logger = logging.getLogger(__name__)


@strawberry.type
class RunsQuery:
@strawberry.field(
description="Get metadata for specified run_ids from the session store"
)
def run_metadata(self, run_ids: List[ID]) -> List[Run]:
# TODO: this is hacky and should be improved together with reworking the format
# functions.
# Note we keep the order here the same as the queried run_ids.
runs = {
run.id: run
for run in format_runs(
data_access_manager.runs.get_runs_by_ids(run_ids),
data_access_manager.runs.get_user_run_details_by_run_ids(run_ids),
)
}
return [runs[run_id] for run_id in run_ids if run_id in runs]

@strawberry.field(description="Get metadata for all runs from the session store")
def runs_list(self) -> List[Run]:
all_runs = data_access_manager.runs.get_all_runs()
if not all_runs:
return []
all_run_ids = [run.id for run in all_runs]
return format_runs(
all_runs,
data_access_manager.runs.get_user_run_details_by_run_ids(all_run_ids),
)

@strawberry.field(
description="Get tracking datasets for specified group and run_ids"
)
def run_tracking_data(
self,
run_ids: List[ID],
group: TrackingDatasetGroup,
show_diff: Optional[bool] = True,
) -> List[TrackingDataset]:
tracking_dataset_models = data_access_manager.tracking_datasets.get_tracking_datasets_by_group_by_run_ids(
run_ids, group
)
# TODO: this handling of dataset.runs is hacky and should be done by e.g. a
# proper query parameter instead of filtering to right run_ids here.
# Note we keep the order here the same as the queried run_ids.

all_tracking_datasets = []

for dataset in tracking_dataset_models:
runs = {run_id: dataset.runs[run_id] for run_id in run_ids}
formatted_tracking_data = format_run_tracking_data(runs, show_diff)
if formatted_tracking_data:
tracking_data = TrackingDataset(
dataset_name=dataset.dataset_name,
dataset_type=dataset.dataset_type,
data=formatted_tracking_data,
run_ids=run_ids,
)
all_tracking_datasets.append(tracking_data)

return all_tracking_datasets

@strawberry.field(
description="Get metrics data for a limited number of recent runs"
)
def run_metrics_data(self, limit: Optional[int] = 25) -> MetricPlotDataset:
run_ids = [
run.id for run in data_access_manager.runs.get_all_runs(limit_amount=limit)
]
group = TrackingDatasetGroup.METRIC

metric_dataset_models = data_access_manager.tracking_datasets.get_tracking_datasets_by_group_by_run_ids(
run_ids, group
)

metric_data = {}
for dataset in metric_dataset_models:
metric_data[dataset.dataset_name] = dataset.runs

formatted_metric_data = format_run_metric_data(metric_data, run_ids)
return MetricPlotDataset(data=formatted_metric_data)


@strawberry.type
class Mutation:
@strawberry.mutation(description="Update run metadata")
def update_run_details(
self, run_id: ID, run_input: RunInput
) -> UpdateRunDetailsResponse:
run = data_access_manager.runs.get_run_by_id(run_id)
if not run:
return UpdateRunDetailsFailure(
id=run_id, error_message=f"Given run_id: {run_id} doesn't exist"
)
updated_run = format_run(
run.id,
json.loads(run.blob),
data_access_manager.runs.get_user_run_details(run.id),
)

# only update user run title if the input is not empty
if run_input.title is not None and bool(run_input.title.strip()):
updated_run.title = run_input.title

if run_input.bookmark is not None:
updated_run.bookmark = run_input.bookmark

if run_input.notes is not None and bool(run_input.notes.strip()):
updated_run.notes = run_input.notes

data_access_manager.runs.create_or_update_user_run_details(
run_id,
updated_run.title,
updated_run.bookmark,
updated_run.notes,
)
return UpdateRunDetailsSuccess(run=updated_run)


@strawberry.type
class VersionQuery:
@strawberry.field(description="Get the installed and latest Kedro-Viz versions")
Expand All @@ -170,8 +32,7 @@ def version(self) -> Version:


schema = strawberry.Schema(
query=(merge_types("Query", (RunsQuery, VersionQuery))),
mutation=Mutation,
query=merge_types("Query", (VersionQuery,)),
extensions=[
AddValidationRules([NoSchemaIntrospectionCustomRule]),
],
Expand Down
198 changes: 0 additions & 198 deletions package/kedro_viz/api/graphql/serializers.py

This file was deleted.

Loading
Loading