Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Some minor performance fixes for task schedular #16313

Merged
merged 13 commits into from
Sep 14, 2023
1 change: 1 addition & 0 deletions changelog.d/16313.misc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Delete device messages asynchronously and in staged batches using the task scheduler.
6 changes: 2 additions & 4 deletions synapse/replication/tcp/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -672,14 +672,12 @@ def on_LOCK_RELEASED(
cmd.instance_name, cmd.lock_name, cmd.lock_key
)

async def on_NEW_ACTIVE_TASK(
def on_NEW_ACTIVE_TASK(
self, conn: IReplicationConnection, cmd: NewActiveTaskCommand
) -> None:
"""Called when get a new NEW_ACTIVE_TASK command."""
if self._task_scheduler:
task = await self._task_scheduler.get_task(cmd.data)
if task:
await self._task_scheduler._launch_task(task)
self._task_scheduler.launch_task_by_id(cmd.data)

def new_connection(self, connection: IReplicationConnection) -> None:
"""Called when we have a new connection."""
Expand Down
6 changes: 6 additions & 0 deletions synapse/storage/databases/main/task_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ async def get_scheduled_tasks(
resource_id: Optional[str] = None,
statuses: Optional[List[TaskStatus]] = None,
max_timestamp: Optional[int] = None,
limit: Optional[int] = None,
) -> List[ScheduledTask]:
"""Get a list of scheduled tasks from the DB.

Expand All @@ -62,6 +63,7 @@ async def get_scheduled_tasks(
statuses: Limit the returned tasks to the specific statuses
max_timestamp: Limit the returned tasks to the ones that have
a timestamp inferior to the specified one
limit: Only return `limit` number of rows if set.

Returns: a list of `ScheduledTask`, ordered by increasing timestamps
"""
Expand Down Expand Up @@ -94,6 +96,10 @@ def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]:

sql = sql + " ORDER BY timestamp"

if limit is not None:
sql += " LIMIT ?"
args.append(limit)

txn.execute(sql, args)
return self.db_pool.cursor_to_dict(txn)

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
/* Copyright 2023 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

CREATE INDEX IF NOT EXISTS scheduled_tasks_timestamp ON scheduled_tasks(timestamp);
100 changes: 70 additions & 30 deletions synapse/util/task_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,14 @@
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set, Tuple

from prometheus_client import Gauge

from twisted.python.failure import Failure

from synapse.logging.context import nested_logging_context
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.types import JsonMapping, ScheduledTask, TaskStatus
from synapse.util.stringutils import random_string

Expand All @@ -30,12 +32,6 @@
logger = logging.getLogger(__name__)


running_tasks_gauge = Gauge(
"synapse_scheduler_running_tasks",
"The number of concurrent running tasks handled by the TaskScheduler",
)


class TaskScheduler:
"""
This is a simple task sheduler aimed at resumable tasks: usually we use `run_in_background`
Expand Down Expand Up @@ -70,6 +66,8 @@ class TaskScheduler:
# Precision of the scheduler, evaluation of tasks to run will only happen
# every `SCHEDULE_INTERVAL_MS` ms
SCHEDULE_INTERVAL_MS = 1 * 60 * 1000 # 1mn
# How often to clean up old tasks.
CLEANUP_INTERVAL_MS = 30 * 60 * 1000
# Time before a complete or failed task is deleted from the DB
KEEP_TASKS_FOR_MS = 7 * 24 * 60 * 60 * 1000 # 1 week
# Maximum number of tasks that can run at the same time
Expand All @@ -92,14 +90,26 @@ def __init__(self, hs: "HomeServer"):
] = {}
self._run_background_tasks = hs.config.worker.run_background_tasks

# Flag to make sure we only try and launch new tasks once at a time.
self._launching_new_tasks = False

if self._run_background_tasks:
self._clock.looping_call(
run_as_background_process,
self._launch_scheduled_tasks,
TaskScheduler.SCHEDULE_INTERVAL_MS,
)
self._clock.looping_call(
self._clean_scheduled_tasks,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I also separated in 2 like you did at some point, but I was not sure that no races exist between the 2 so I ended up removing it.
After more thinking I think we should be safe, we are iterating on 2 disjoint set of tasks (active+schedule vs failed+complete) and I don't see any trouble if an active task becomes complete or failed in the middle.

TaskScheduler.SCHEDULE_INTERVAL_MS,
"handle_scheduled_tasks",
self._handle_scheduled_tasks,
)

LaterGauge(
"synapse_scheduler_running_tasks",
"The number of concurrent running tasks handled by the TaskScheduler",
labels=None,
caller=lambda: len(self._running_tasks),
)

def register_action(
self,
function: Callable[
Expand Down Expand Up @@ -234,6 +244,7 @@ async def get_tasks(
resource_id: Optional[str] = None,
statuses: Optional[List[TaskStatus]] = None,
max_timestamp: Optional[int] = None,
limit: Optional[int] = None,
) -> List[ScheduledTask]:
"""Get a list of tasks. Returns all the tasks if no args is provided.

Expand All @@ -247,6 +258,7 @@ async def get_tasks(
statuses: Limit the returned tasks to the specific statuses
max_timestamp: Limit the returned tasks to the ones that have
a timestamp inferior to the specified one
limit: Only return `limit` number of rows if set.

Returns
A list of `ScheduledTask`, ordered by increasing timestamps
Expand All @@ -256,6 +268,7 @@ async def get_tasks(
resource_id=resource_id,
statuses=statuses,
max_timestamp=max_timestamp,
limit=limit,
)

async def delete_task(self, id: str) -> None:
Expand All @@ -273,34 +286,58 @@ async def delete_task(self, id: str) -> None:
raise Exception(f"Task {id} is currently ACTIVE and can't be deleted")
await self._store.delete_scheduled_task(id)

async def _handle_scheduled_tasks(self) -> None:
"""Main loop taking care of launching tasks and cleaning up old ones."""
await self._launch_scheduled_tasks()
await self._clean_scheduled_tasks()
def launch_task_by_id(self, id: str) -> None:
"""Try launching the task with the given ID."""
# Don't bother trying to launch new tasks if we're already at capacity.
if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS:
return

run_as_background_process("launch_task_by_id", self._launch_task_by_id, id)
MatMaul marked this conversation as resolved.
Show resolved Hide resolved

async def _launch_task_by_id(self, id: str) -> None:
"""Helper async function for `launch_task_by_id`."""
task = await self.get_task(id)
if task:
await self._launch_task(task)

@wrap_as_background_process("launch_scheduled_tasks")
async def _launch_scheduled_tasks(self) -> None:
"""Retrieve and launch scheduled tasks that should be running at that time."""
for task in await self.get_tasks(statuses=[TaskStatus.ACTIVE]):
await self._launch_task(task)
for task in await self.get_tasks(
statuses=[TaskStatus.SCHEDULED], max_timestamp=self._clock.time_msec()
):
await self._launch_task(task)
# Don't bother trying to launch new tasks if we're already at capacity.
if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS:
return

if self._launching_new_tasks:
return

running_tasks_gauge.set(len(self._running_tasks))
self._launching_new_tasks = True

try:
for task in await self.get_tasks(
statuses=[TaskStatus.ACTIVE], limit=self.MAX_CONCURRENT_RUNNING_TASKS
):
await self._launch_task(task)
for task in await self.get_tasks(
statuses=[TaskStatus.SCHEDULED],
max_timestamp=self._clock.time_msec(),
limit=self.MAX_CONCURRENT_RUNNING_TASKS,
):
await self._launch_task(task)

finally:
self._launching_new_tasks = False

@wrap_as_background_process("clean_scheduled_tasks")
async def _clean_scheduled_tasks(self) -> None:
"""Clean old complete or failed jobs to avoid clutter the DB."""
now = self._clock.time_msec()
for task in await self._store.get_scheduled_tasks(
statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE]
statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE],
max_timestamp=now - TaskScheduler.KEEP_TASKS_FOR_MS,
):
# FAILED and COMPLETE tasks should never be running
assert task.id not in self._running_tasks
if (
self._clock.time_msec()
> task.timestamp + TaskScheduler.KEEP_TASKS_FOR_MS
):
await self._store.delete_scheduled_task(task.id)
await self._store.delete_scheduled_task(task.id)

async def _launch_task(self, task: ScheduledTask) -> None:
"""Launch a scheduled task now.
Expand Down Expand Up @@ -339,6 +376,9 @@ async def wrapper() -> None:
)
self._running_tasks.remove(task.id)

# Try launch a new task since we've finished with this one.
self._clock.call_later(1, self._launch_scheduled_tasks)
MatMaul marked this conversation as resolved.
Show resolved Hide resolved

if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS:
return

Expand All @@ -355,4 +395,4 @@ async def wrapper() -> None:

self._running_tasks.add(task.id)
await self.update_task(task.id, status=TaskStatus.ACTIVE)
run_as_background_process(task.action, wrapper)
run_as_background_process(f"task-{task.action}", wrapper)
Loading