Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Commit

Permalink
Revert "Make all process_replication_rows methods async (#13304)" (#…
Browse files Browse the repository at this point in the history
…13312)

This reverts commit 5d4028f.
  • Loading branch information
erikjohnston authored Jul 18, 2022
1 parent cf5fa50 commit f721f1b
Show file tree
Hide file tree
Showing 14 changed files with 25 additions and 40 deletions.
1 change: 0 additions & 1 deletion changelog.d/13304.misc

This file was deleted.

4 changes: 2 additions & 2 deletions synapse/handlers/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ async def _push_remote(self, member: RoomMember, typing: bool) -> None:
except Exception:
logger.exception("Error pushing typing notif to remotes")

async def process_replication_rows(
def process_replication_rows(
self, token: int, rows: List[TypingStream.TypingStreamRow]
) -> None:
"""Should be called whenever we receive updates for typing stream."""
Expand Down Expand Up @@ -444,7 +444,7 @@ async def get_all_typing_updates(

return rows, current_id, limited

async def process_replication_rows(
def process_replication_rows(
self, token: int, rows: List[TypingStream.TypingStreamRow]
) -> None:
# The writing process should never get updates from replication.
Expand Down
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/devices.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(
def get_device_stream_token(self) -> int:
return self._device_list_id_gen.get_current_token()

async def process_replication_rows(
def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
) -> None:
if stream_name == DeviceListsStream.NAME:
Expand All @@ -59,9 +59,7 @@ async def process_replication_rows(
self._device_list_id_gen.advance(instance_name, token)
for row in rows:
self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
return await super().process_replication_rows(
stream_name, instance_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)

def _invalidate_caches_for_devices(
self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
Expand Down
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/push_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
def get_max_push_rules_stream_id(self) -> int:
return self._push_rules_stream_id_gen.get_current_token()

async def process_replication_rows(
def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
) -> None:
if stream_name == PushRulesStream.NAME:
Expand All @@ -33,6 +33,4 @@ async def process_replication_rows(
self.get_push_rules_for_user.invalidate((row.user_id,))
self.get_push_rules_enabled_for_user.invalidate((row.user_id,))
self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
return await super().process_replication_rows(
stream_name, instance_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/pushers.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,9 @@ def __init__(
def get_pushers_stream_token(self) -> int:
return self._pushers_id_gen.get_current_token()

async def process_replication_rows(
def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
) -> None:
if stream_name == PushersStream.NAME:
self._pushers_id_gen.advance(instance_name, token)
return await super().process_replication_rows(
stream_name, instance_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/tcp/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,15 +144,13 @@ async def on_rdata(
token: stream token for this batch of rows
rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row.
"""
await self.store.process_replication_rows(
stream_name, instance_name, token, rows
)
self.store.process_replication_rows(stream_name, instance_name, token, rows)

if self.send_handler:
await self.send_handler.process_replication_rows(stream_name, token, rows)

if stream_name == TypingStream.NAME:
await self._typing_handler.process_replication_rows(token, rows)
self._typing_handler.process_replication_rows(token, rows)
self.notifier.on_new_event(
StreamKeyType.TYPING, token, rooms=[row.room_id for row in rows]
)
Expand Down
2 changes: 1 addition & 1 deletion synapse/storage/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
self.database_engine = database.engine
self.db_pool = database

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand Down
4 changes: 2 additions & 2 deletions synapse/storage/databases/main/account_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ async def ignored_users(self, user_id: str) -> FrozenSet[str]:
)
)

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand All @@ -437,7 +437,7 @@ async def process_replication_rows(
)
self._account_data_stream_cache.entity_has_changed(row.user_id, token)

await super().process_replication_rows(stream_name, instance_name, token, rows)
super().process_replication_rows(stream_name, instance_name, token, rows)

async def add_account_data_to_room(
self, user_id: str, room_id: str, account_data_type: str, content: JsonDict
Expand Down
4 changes: 2 additions & 2 deletions synapse/storage/databases/main/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def get_all_updated_caches_txn(
"get_all_updated_caches", get_all_updated_caches_txn
)

async def process_replication_rows(
def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
) -> None:
if stream_name == EventsStream.NAME:
Expand Down Expand Up @@ -154,7 +154,7 @@ async def process_replication_rows(
else:
self._attempt_to_invalidate_cache(row.cache_func, row.keys)

await super().process_replication_rows(stream_name, instance_name, token, rows)
super().process_replication_rows(stream_name, instance_name, token, rows)

def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None:
data = row.data
Expand Down
6 changes: 2 additions & 4 deletions synapse/storage/databases/main/deviceinbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def __init__(
prefilled_cache=device_outbox_prefill,
)

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand All @@ -148,9 +148,7 @@ async def process_replication_rows(
self._device_federation_outbox_stream_cache.entity_has_changed(
row.entity, token
)
return await super().process_replication_rows(
stream_name, instance_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)

def get_to_device_stream_token(self) -> int:
return self._device_inbox_id_gen.get_current_token()
Expand Down
4 changes: 2 additions & 2 deletions synapse/storage/databases/main/events_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def get_chain_id_txn(txn: Cursor) -> int:
id_column="chain_id",
)

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand All @@ -292,7 +292,7 @@ async def process_replication_rows(
elif stream_name == BackfillStream.NAME:
self._backfill_id_gen.advance(instance_name, -token)

await super().process_replication_rows(stream_name, instance_name, token, rows)
super().process_replication_rows(stream_name, instance_name, token, rows)

async def have_censored_event(self, event_id: str) -> bool:
"""Check if an event has been censored, i.e. if the content of the event has been erased
Expand Down
6 changes: 2 additions & 4 deletions synapse/storage/databases/main/presence.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ def take_presence_startup_info(self) -> List[UserPresenceState]:
self._presence_on_startup = []
return active_on_startup

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand All @@ -443,6 +443,4 @@ async def process_replication_rows(
for row in rows:
self.presence_stream_cache.entity_has_changed(row.user_id, token)
self._get_presence_for_user.invalidate((row.user_id,))
return await super().process_replication_rows(
stream_name, instance_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/storage/databases/main/receipts.py
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ def invalidate_caches_for_receipt(
"get_unread_event_push_actions_by_room_for_user", (room_id,)
)

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand All @@ -604,9 +604,7 @@ async def process_replication_rows(
)
self._receipts_stream_cache.entity_has_changed(row.room_id, token)

return await super().process_replication_rows(
stream_name, instance_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)

def _insert_linearized_receipt_txn(
self,
Expand Down
4 changes: 2 additions & 2 deletions synapse/storage/databases/main/tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def _update_revision_txn(
# than the id that the client has.
pass

async def process_replication_rows(
def process_replication_rows(
self,
stream_name: str,
instance_name: str,
Expand All @@ -305,7 +305,7 @@ async def process_replication_rows(
self.get_tags_for_user.invalidate((row.user_id,))
self._account_data_stream_cache.entity_has_changed(row.user_id, token)

await super().process_replication_rows(stream_name, instance_name, token, rows)
super().process_replication_rows(stream_name, instance_name, token, rows)


class TagsStore(TagsWorkerStore):
Expand Down

0 comments on commit f721f1b

Please sign in to comment.