-
-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Add cancellation support to ReadWriteLock
#12120
Changes from 5 commits
89fe787
f1f363d
9744823
3241adc
68f5abe
28dbe41
c9f85e4
716cded
c4a2a58
31a2bb2
1c1b46a
65f97fa
cadfe0a
1cd035b
4c47827
1b9ec9b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Add support for cancellation to `ReadWriteLock`. |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -18,9 +18,10 @@ | |||||
import inspect | ||||||
import itertools | ||||||
import logging | ||||||
from contextlib import contextmanager | ||||||
from contextlib import asynccontextmanager, contextmanager | ||||||
from typing import ( | ||||||
Any, | ||||||
AsyncIterator, | ||||||
Awaitable, | ||||||
Callable, | ||||||
Collection, | ||||||
|
@@ -40,7 +41,7 @@ | |||||
) | ||||||
|
||||||
import attr | ||||||
from typing_extensions import ContextManager | ||||||
from typing_extensions import AsyncContextManager | ||||||
|
||||||
from twisted.internet import defer | ||||||
from twisted.internet.defer import CancelledError | ||||||
|
@@ -483,7 +484,7 @@ class ReadWriteLock: | |||||
|
||||||
Example: | ||||||
|
||||||
with await read_write_lock.read("test_key"): | ||||||
async with read_write_lock.read("test_key"): | ||||||
# do some work | ||||||
""" | ||||||
|
||||||
|
@@ -506,22 +507,24 @@ def __init__(self) -> None: | |||||
# Latest writer queued | ||||||
self.key_to_current_writer: Dict[str, defer.Deferred] = {} | ||||||
|
||||||
async def read(self, key: str) -> ContextManager: | ||||||
def read(self, key: str) -> AsyncContextManager: | ||||||
new_defer: "defer.Deferred[None]" = defer.Deferred() | ||||||
|
||||||
curr_readers = self.key_to_current_readers.setdefault(key, set()) | ||||||
curr_writer = self.key_to_current_writer.get(key, None) | ||||||
|
||||||
curr_readers.add(new_defer) | ||||||
|
||||||
# We wait for the latest writer to finish writing. We can safely ignore | ||||||
# any existing readers... as they're readers. | ||||||
if curr_writer: | ||||||
await make_deferred_yieldable(curr_writer) | ||||||
|
||||||
@contextmanager | ||||||
def _ctx_manager() -> Iterator[None]: | ||||||
@asynccontextmanager | ||||||
async def _ctx_manager() -> AsyncIterator[None]: | ||||||
try: | ||||||
# We wait for the latest writer to finish writing. We can safely ignore | ||||||
# any existing readers... as they're readers. | ||||||
# May raise a `CancelledError` if the `Deferred` wrapping us is | ||||||
# cancelled. The `Deferred` we are waiting on must not be cancelled, | ||||||
# since we do not own it. | ||||||
if curr_writer: | ||||||
await make_deferred_yieldable(stop_cancellation(curr_writer)) | ||||||
yield | ||||||
finally: | ||||||
with PreserveLoggingContext(): | ||||||
|
@@ -530,7 +533,7 @@ def _ctx_manager() -> Iterator[None]: | |||||
|
||||||
return _ctx_manager() | ||||||
|
||||||
async def write(self, key: str) -> ContextManager: | ||||||
def write(self, key: str) -> AsyncContextManager: | ||||||
new_defer: "defer.Deferred[None]" = defer.Deferred() | ||||||
|
||||||
curr_readers = self.key_to_current_readers.get(key, set()) | ||||||
|
@@ -541,25 +544,41 @@ async def write(self, key: str) -> ContextManager: | |||||
if curr_writer: | ||||||
to_wait_on.append(curr_writer) | ||||||
|
||||||
# We can clear the list of current readers since the new writer waits | ||||||
# We can clear the list of current readers since `new_defer` waits | ||||||
# for them to finish. | ||||||
curr_readers.clear() | ||||||
self.key_to_current_writer[key] = new_defer | ||||||
|
||||||
await make_deferred_yieldable(defer.gatherResults(to_wait_on)) | ||||||
|
||||||
@contextmanager | ||||||
def _ctx_manager() -> Iterator[None]: | ||||||
@asynccontextmanager | ||||||
async def _ctx_manager() -> AsyncIterator[None]: | ||||||
to_wait_on_defer = defer.gatherResults(to_wait_on) | ||||||
try: | ||||||
# Wait for all current readers and the latest writer to finish. | ||||||
# May raise a `CancelledError` if the `Deferred` wrapping us is | ||||||
# cancelled. The `Deferred`s we are waiting on must not be cancelled, | ||||||
# since we do not own them. | ||||||
await make_deferred_yieldable(stop_cancellation(to_wait_on_defer)) | ||||||
yield | ||||||
finally: | ||||||
with PreserveLoggingContext(): | ||||||
new_defer.callback(None) | ||||||
# `self.key_to_current_writer[key]` may be missing if there was another | ||||||
# writer waiting for us and it completed entirely within the | ||||||
# `new_defer.callback()` call above. | ||||||
if self.key_to_current_writer.get(key) == new_defer: | ||||||
self.key_to_current_writer.pop(key) | ||||||
|
||||||
def release() -> None: | ||||||
with PreserveLoggingContext(): | ||||||
richvdh marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
new_defer.callback(None) | ||||||
# `self.key_to_current_writer[key]` may be missing if there was another | ||||||
# writer waiting for us and it completed entirely within the | ||||||
# `new_defer.callback()` call above. | ||||||
if self.key_to_current_writer.get(key) == new_defer: | ||||||
self.key_to_current_writer.pop(key) | ||||||
|
||||||
if to_wait_on_defer.called: | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. could we, instead of all this, do: to_wait_on_defer = defer.gatherResults(to_wait_on)
to_wait_on_defer.addBoth(lambda _: release())
await make_deferred_yieldable(to_wait_on_defer)
yield ... possibly with more There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
try:
await make_deferred_yieldable(stop_cancellation(to_wait_on_defer))
yield
finally:
to_wait_on_defer.addBoth(lambda _: release()) which I was originally hesitant to do because of re-entrancy in the happy path:
The sometimes-extra delay before As an alternative, I'm very tempted by @erikjohnston's suggestion of delaying CancelledErrors: try:
await make_deferred_yieldable(delay_cancellation(to_wait_on_defer))
yield
finally:
release() # but inline again Where We'd get better maintainability at the cost of having the crud associated with cancelled requests lingering a little while longer, but maybe that's a worthwhile tradeoff. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
oh, duh. Yes, of course.
ugh, right, yes, let's avoid that. And yes, There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||||||
release() | ||||||
else: | ||||||
# We don't have the lock yet, probably because we were cancelled | ||||||
# while waiting for it. We can't call `release()` yet, since | ||||||
# `new_defer` must only resolve once all previous readers and | ||||||
# writers have finished. | ||||||
# NB: `release()` won't have a logcontext in this path. | ||||||
to_wait_on_defer.addCallback(lambda _: release()) | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. presumably we need to do this even if
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll make the change for the sake of robustness, but all of the Deferreds that represent lock release, and thus also |
||||||
|
||||||
return _ctx_manager() | ||||||
|
||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not sure of the benefit of using async context managers.
It mainly benefits the cleanup for the read path, where the cleanup for not-holding-the-lock and holding-the-lock happen to be the same. On the write path we have to handle both cases differently, by deferring the cleanup until we actually acquire the lock.