diff --git a/CHANGES.md b/CHANGES.md index 38a0814bbf53..6066a38c5a7b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,103 @@ +Synapse 1.22.0rc1 (2020-10-22) +============================== + +Features +-------- + +- Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. ([\#7658](https://github.com/matrix-org/synapse/issues/7658)) +- Add ability for `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. ([\#8292](https://github.com/matrix-org/synapse/issues/8292), [\#8467](https://github.com/matrix-org/synapse/issues/8467)) +- Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). ([\#8312](https://github.com/matrix-org/synapse/issues/8312), [\#8501](https://github.com/matrix-org/synapse/issues/8501)) +- Add support for running background tasks in a separate worker process. ([\#8369](https://github.com/matrix-org/synapse/issues/8369), [\#8458](https://github.com/matrix-org/synapse/issues/8458), [\#8489](https://github.com/matrix-org/synapse/issues/8489), [\#8513](https://github.com/matrix-org/synapse/issues/8513), [\#8544](https://github.com/matrix-org/synapse/issues/8544), [\#8599](https://github.com/matrix-org/synapse/issues/8599)) +- Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). ([\#8380](https://github.com/matrix-org/synapse/issues/8380)) +- Add support for [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409), which allows sending typing, read receipts, and presence events to appservices. ([\#8437](https://github.com/matrix-org/synapse/issues/8437), [\#8590](https://github.com/matrix-org/synapse/issues/8590)) +- Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). ([\#8461](https://github.com/matrix-org/synapse/issues/8461)) +- Add the ability to send non-membership events into a room via the `ModuleApi`. ([\#8479](https://github.com/matrix-org/synapse/issues/8479)) +- Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. ([\#8502](https://github.com/matrix-org/synapse/issues/8502)) +- Add support for modifying event content in `ThirdPartyRules` modules. ([\#8535](https://github.com/matrix-org/synapse/issues/8535), [\#8564](https://github.com/matrix-org/synapse/issues/8564)) + + +Bugfixes +-------- + +- Fix a longstanding bug where invalid ignored users in account data could break clients. ([\#8454](https://github.com/matrix-org/synapse/issues/8454)) +- Fix a bug where backfilling a room with an event that was missing the `redacts` field would break. ([\#8457](https://github.com/matrix-org/synapse/issues/8457)) +- Don't attempt to respond to some requests if the client has already disconnected. ([\#8465](https://github.com/matrix-org/synapse/issues/8465)) +- Fix message duplication if something goes wrong after persisting the event. ([\#8476](https://github.com/matrix-org/synapse/issues/8476)) +- Fix incremental sync returning an incorrect `prev_batch` token in timeline section, which when used to paginate returned events that were included in the incremental sync. Broken since v0.16.0. ([\#8486](https://github.com/matrix-org/synapse/issues/8486)) +- Expose the `uk.half-shot.msc2778.login.application_service` to clients from the login API. This feature was added in v1.21.0, but was not exposed as a potential login flow. ([\#8504](https://github.com/matrix-org/synapse/issues/8504)) +- Fix error code for `/profile/{userId}/displayname` to be `M_BAD_JSON`. ([\#8517](https://github.com/matrix-org/synapse/issues/8517)) +- Fix a bug introduced in v1.7.0 that could cause Synapse to insert values from non-state `m.room.retention` events into the `room_retention` database table. ([\#8527](https://github.com/matrix-org/synapse/issues/8527)) +- Fix not sending events over federation when using sharded event writers. ([\#8536](https://github.com/matrix-org/synapse/issues/8536)) +- Fix a long standing bug where email notifications for encrypted messages were blank. ([\#8545](https://github.com/matrix-org/synapse/issues/8545)) +- Fix increase in the number of `There was no active span...` errors logged when using OpenTracing. ([\#8567](https://github.com/matrix-org/synapse/issues/8567)) +- Fix a bug that prevented errors encountered during execution of the `synapse_port_db` from being correctly printed. ([\#8585](https://github.com/matrix-org/synapse/issues/8585)) +- Fix appservice transactions to only include a maximum of 100 persistent and 100 ephemeral events. ([\#8606](https://github.com/matrix-org/synapse/issues/8606)) + + +Updates to the Docker image +--------------------------- + +- Added multi-arch support (arm64,arm/v7) for the docker images. Contributed by @maquis196. ([\#7921](https://github.com/matrix-org/synapse/issues/7921)) +- Add support for passing commandline args to the synapse process. Contributed by @samuel-p. ([\#8390](https://github.com/matrix-org/synapse/issues/8390)) + + +Improved Documentation +---------------------- + +- Update the directions for using the manhole with coroutines. ([\#8462](https://github.com/matrix-org/synapse/issues/8462)) +- Improve readme by adding new shield.io badges. ([\#8493](https://github.com/matrix-org/synapse/issues/8493)) +- Added note about docker in manhole.md regarding which ip address to bind to. Contributed by @Maquis196. ([\#8526](https://github.com/matrix-org/synapse/issues/8526)) +- Document the new behaviour of the `allowed_lifetime_min` and `allowed_lifetime_max` settings in the room retention configuration. ([\#8529](https://github.com/matrix-org/synapse/issues/8529)) + + +Deprecations and Removals +------------------------- + +- Drop unused `device_max_stream_id` table. ([\#8589](https://github.com/matrix-org/synapse/issues/8589)) + + +Internal Changes +---------------- + +- Check for unreachable code with mypy. ([\#8432](https://github.com/matrix-org/synapse/issues/8432)) +- Add unit test for event persister sharding. ([\#8433](https://github.com/matrix-org/synapse/issues/8433)) +- Allow events to be sent to clients sooner when using sharded event persisters. ([\#8439](https://github.com/matrix-org/synapse/issues/8439), [\#8488](https://github.com/matrix-org/synapse/issues/8488), [\#8496](https://github.com/matrix-org/synapse/issues/8496), [\#8499](https://github.com/matrix-org/synapse/issues/8499)) +- Configure `public_baseurl` when using demo scripts. ([\#8443](https://github.com/matrix-org/synapse/issues/8443)) +- Add SQL logging on queries that happen during startup. ([\#8448](https://github.com/matrix-org/synapse/issues/8448)) +- Speed up unit tests when using PostgreSQL. ([\#8450](https://github.com/matrix-org/synapse/issues/8450)) +- Remove redundant database loads of stream_ordering for events we already have. ([\#8452](https://github.com/matrix-org/synapse/issues/8452)) +- Reduce inconsistencies between codepaths for membership and non-membership events. ([\#8463](https://github.com/matrix-org/synapse/issues/8463)) +- Combine `SpamCheckerApi` with the more generic `ModuleApi`. ([\#8464](https://github.com/matrix-org/synapse/issues/8464)) +- Additional testing for `ThirdPartyEventRules`. ([\#8468](https://github.com/matrix-org/synapse/issues/8468)) +- Add `-d` option to `./scripts-dev/lint.sh` to lint files that have changed since the last git commit. ([\#8472](https://github.com/matrix-org/synapse/issues/8472)) +- Unblacklist some sytests. ([\#8474](https://github.com/matrix-org/synapse/issues/8474)) +- Include the log level in the phone home stats. ([\#8477](https://github.com/matrix-org/synapse/issues/8477)) +- Remove outdated sphinx documentation, scripts and configuration. ([\#8480](https://github.com/matrix-org/synapse/issues/8480)) +- Clarify error message when plugin config parsers raise an error. ([\#8492](https://github.com/matrix-org/synapse/issues/8492)) +- Remove the deprecated `Handlers` object. ([\#8494](https://github.com/matrix-org/synapse/issues/8494)) +- Fix a threadsafety bug in unit tests. ([\#8497](https://github.com/matrix-org/synapse/issues/8497)) +- Add user agent to user_daily_visits table. ([\#8503](https://github.com/matrix-org/synapse/issues/8503)) +- Add type hints to various parts of the code base. ([\#8407](https://github.com/matrix-org/synapse/issues/8407), [\#8505](https://github.com/matrix-org/synapse/issues/8505), [\#8507](https://github.com/matrix-org/synapse/issues/8507), [\#8547](https://github.com/matrix-org/synapse/issues/8547), [\#8562](https://github.com/matrix-org/synapse/issues/8562), [\#8609](https://github.com/matrix-org/synapse/issues/8609)) +- Remove unused code from the test framework. ([\#8514](https://github.com/matrix-org/synapse/issues/8514)) +- Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. ([\#8515](https://github.com/matrix-org/synapse/issues/8515)) +- Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`. ([\#8537](https://github.com/matrix-org/synapse/issues/8537)) +- Improve database performance by executing more queries without starting transactions. ([\#8542](https://github.com/matrix-org/synapse/issues/8542)) +- Rename `Cache` to `DeferredCache`, to better reflect its purpose. ([\#8548](https://github.com/matrix-org/synapse/issues/8548)) +- Move metric registration code down into `LruCache`. ([\#8561](https://github.com/matrix-org/synapse/issues/8561), [\#8591](https://github.com/matrix-org/synapse/issues/8591)) +- Replace `DeferredCache` with the lighter-weight `LruCache` where possible. ([\#8563](https://github.com/matrix-org/synapse/issues/8563)) +- Add virtualenv-generated folders to `.gitignore`. ([\#8566](https://github.com/matrix-org/synapse/issues/8566)) +- Add `get_immediate` method to `DeferredCache`. ([\#8568](https://github.com/matrix-org/synapse/issues/8568)) +- Fix mypy not properly checking across the codebase, additionally, fix a typing assertion error in `handlers/auth.py`. ([\#8569](https://github.com/matrix-org/synapse/issues/8569)) +- Fix `synmark` benchmark runner. ([\#8571](https://github.com/matrix-org/synapse/issues/8571)) +- Modify `DeferredCache.get()` to return `Deferred`s instead of `ObservableDeferred`s. ([\#8572](https://github.com/matrix-org/synapse/issues/8572)) +- Adjust a protocol-type definition to fit `sqlite3` assertions. ([\#8577](https://github.com/matrix-org/synapse/issues/8577)) +- Support macOS on the `synmark` benchmark runner. ([\#8578](https://github.com/matrix-org/synapse/issues/8578)) +- Update `mypy` static type checker to 0.790. ([\#8583](https://github.com/matrix-org/synapse/issues/8583), [\#8600](https://github.com/matrix-org/synapse/issues/8600)) +- Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. ([\#8587](https://github.com/matrix-org/synapse/issues/8587)) +- Remove extraneous unittest logging decorators from unit tests. ([\#8592](https://github.com/matrix-org/synapse/issues/8592)) +- Minor optimisations in caching code. ([\#8593](https://github.com/matrix-org/synapse/issues/8593), [\#8594](https://github.com/matrix-org/synapse/issues/8594)) + + Synapse 1.21.2 (2020-10-15) =========================== diff --git a/changelog.d/7658.feature b/changelog.d/7658.feature deleted file mode 100644 index fbf345988d35..000000000000 --- a/changelog.d/7658.feature +++ /dev/null @@ -1 +0,0 @@ -Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. diff --git a/changelog.d/7921.docker b/changelog.d/7921.docker deleted file mode 100644 index 7cecd67c6a26..000000000000 --- a/changelog.d/7921.docker +++ /dev/null @@ -1 +0,0 @@ -Added multi-arch support (arm64,arm/v7) for the docker images. Contributed by @maquis196. diff --git a/changelog.d/8292.feature b/changelog.d/8292.feature deleted file mode 100644 index 6d0335e2c827..000000000000 --- a/changelog.d/8292.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/changelog.d/8312.feature b/changelog.d/8312.feature deleted file mode 100644 index 222a1b032a4d..000000000000 --- a/changelog.d/8312.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). \ No newline at end of file diff --git a/changelog.d/8369.feature b/changelog.d/8369.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8369.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8380.feature b/changelog.d/8380.feature deleted file mode 100644 index 05ccea19dce4..000000000000 --- a/changelog.d/8380.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). diff --git a/changelog.d/8390.docker b/changelog.d/8390.docker deleted file mode 100644 index f71b8e4bbf2d..000000000000 --- a/changelog.d/8390.docker +++ /dev/null @@ -1 +0,0 @@ -Add support for passing commandline args to the synapse process. Contributed by @samuel-p. diff --git a/changelog.d/8407.misc b/changelog.d/8407.misc deleted file mode 100644 index d37002d75bf1..000000000000 --- a/changelog.d/8407.misc +++ /dev/null @@ -1 +0,0 @@ -Add typing information to the device handler. diff --git a/changelog.d/8432.misc b/changelog.d/8432.misc deleted file mode 100644 index 01fdad4caf6a..000000000000 --- a/changelog.d/8432.misc +++ /dev/null @@ -1 +0,0 @@ -Check for unreachable code with mypy. diff --git a/changelog.d/8433.misc b/changelog.d/8433.misc deleted file mode 100644 index 05f8b5bbf41e..000000000000 --- a/changelog.d/8433.misc +++ /dev/null @@ -1 +0,0 @@ -Add unit test for event persister sharding. diff --git a/changelog.d/8437.feature b/changelog.d/8437.feature deleted file mode 100644 index 4abcccb326e0..000000000000 --- a/changelog.d/8437.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. diff --git a/changelog.d/8439.misc b/changelog.d/8439.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8439.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8443.misc b/changelog.d/8443.misc deleted file mode 100644 index 633598e6b302..000000000000 --- a/changelog.d/8443.misc +++ /dev/null @@ -1 +0,0 @@ -Configure `public_baseurl` when using demo scripts. diff --git a/changelog.d/8448.misc b/changelog.d/8448.misc deleted file mode 100644 index 5ddda1803b9b..000000000000 --- a/changelog.d/8448.misc +++ /dev/null @@ -1 +0,0 @@ -Add SQL logging on queries that happen during startup. diff --git a/changelog.d/8450.misc b/changelog.d/8450.misc deleted file mode 100644 index 4e04c523abef..000000000000 --- a/changelog.d/8450.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up unit tests when using PostgreSQL. diff --git a/changelog.d/8452.misc b/changelog.d/8452.misc deleted file mode 100644 index 8288d91c78b1..000000000000 --- a/changelog.d/8452.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant databae loads of stream_ordering for events we already have. diff --git a/changelog.d/8454.bugfix b/changelog.d/8454.bugfix deleted file mode 100644 index c06d490b6f15..000000000000 --- a/changelog.d/8454.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where invalid ignored users in account data could break clients. diff --git a/changelog.d/8457.bugfix b/changelog.d/8457.bugfix deleted file mode 100644 index 545b06d180c1..000000000000 --- a/changelog.d/8457.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where backfilling a room with an event that was missing the `redacts` field would break. diff --git a/changelog.d/8458.feature b/changelog.d/8458.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8458.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8461.feature b/changelog.d/8461.feature deleted file mode 100644 index 3665d670e117..000000000000 --- a/changelog.d/8461.feature +++ /dev/null @@ -1 +0,0 @@ -Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). diff --git a/changelog.d/8462.doc b/changelog.d/8462.doc deleted file mode 100644 index cf84db6db7f2..000000000000 --- a/changelog.d/8462.doc +++ /dev/null @@ -1 +0,0 @@ -Update the directions for using the manhole with coroutines. diff --git a/changelog.d/8463.misc b/changelog.d/8463.misc deleted file mode 100644 index 040c9bb90f12..000000000000 --- a/changelog.d/8463.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce inconsistencies between codepaths for membership and non-membership events. diff --git a/changelog.d/8464.misc b/changelog.d/8464.misc deleted file mode 100644 index a552e88f9fc8..000000000000 --- a/changelog.d/8464.misc +++ /dev/null @@ -1 +0,0 @@ -Combine `SpamCheckerApi` with the more generic `ModuleApi`. diff --git a/changelog.d/8465.bugfix b/changelog.d/8465.bugfix deleted file mode 100644 index 73f895b26879..000000000000 --- a/changelog.d/8465.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't attempt to respond to some requests if the client has already disconnected. \ No newline at end of file diff --git a/changelog.d/8467.feature b/changelog.d/8467.feature deleted file mode 100644 index 6d0335e2c827..000000000000 --- a/changelog.d/8467.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/changelog.d/8468.misc b/changelog.d/8468.misc deleted file mode 100644 index 32ba991e6421..000000000000 --- a/changelog.d/8468.misc +++ /dev/null @@ -1 +0,0 @@ -Additional testing for `ThirdPartyEventRules`. diff --git a/changelog.d/8472.misc b/changelog.d/8472.misc deleted file mode 100644 index 880f3f5e14fa..000000000000 --- a/changelog.d/8472.misc +++ /dev/null @@ -1 +0,0 @@ -Add `-d` option to `./scripts-dev/lint.sh` to lint files that have changed since the last git commit. \ No newline at end of file diff --git a/changelog.d/8474.misc b/changelog.d/8474.misc deleted file mode 100644 index 65e329a6e3e3..000000000000 --- a/changelog.d/8474.misc +++ /dev/null @@ -1 +0,0 @@ -Unblacklist some sytests. diff --git a/changelog.d/8476.bugfix b/changelog.d/8476.bugfix deleted file mode 100644 index 993a269979af..000000000000 --- a/changelog.d/8476.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix message duplication if something goes wrong after persisting the event. diff --git a/changelog.d/8477.misc b/changelog.d/8477.misc deleted file mode 100644 index 2ee1606b6e32..000000000000 --- a/changelog.d/8477.misc +++ /dev/null @@ -1 +0,0 @@ -Include the log level in the phone home stats. diff --git a/changelog.d/8479.feature b/changelog.d/8479.feature deleted file mode 100644 index 11adeec8a987..000000000000 --- a/changelog.d/8479.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to send non-membership events into a room via the `ModuleApi`. \ No newline at end of file diff --git a/changelog.d/8480.misc b/changelog.d/8480.misc deleted file mode 100644 index 81633af2965e..000000000000 --- a/changelog.d/8480.misc +++ /dev/null @@ -1 +0,0 @@ -Remove outdated sphinx documentation, scripts and configuration. \ No newline at end of file diff --git a/changelog.d/8486.bugfix b/changelog.d/8486.bugfix deleted file mode 100644 index 63fc091ba674..000000000000 --- a/changelog.d/8486.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incremental sync returning an incorrect `prev_batch` token in timeline section, which when used to paginate returned events that were included in the incremental sync. Broken since v0.16.0. diff --git a/changelog.d/8488.misc b/changelog.d/8488.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8488.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8489.feature b/changelog.d/8489.feature deleted file mode 100644 index 22591870a49f..000000000000 --- a/changelog.d/8489.feature +++ /dev/null @@ -1 +0,0 @@ - Allow running background tasks in a separate worker process. diff --git a/changelog.d/8492.misc b/changelog.d/8492.misc deleted file mode 100644 index a344aee791b2..000000000000 --- a/changelog.d/8492.misc +++ /dev/null @@ -1 +0,0 @@ -Clarify error message when plugin config parsers raise an error. diff --git a/changelog.d/8493.doc b/changelog.d/8493.doc deleted file mode 100644 index 26797cd99e3b..000000000000 --- a/changelog.d/8493.doc +++ /dev/null @@ -1 +0,0 @@ -Improve readme by adding new shield.io badges. diff --git a/changelog.d/8494.misc b/changelog.d/8494.misc deleted file mode 100644 index 6e56c6b8548c..000000000000 --- a/changelog.d/8494.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the deprecated `Handlers` object. diff --git a/changelog.d/8496.misc b/changelog.d/8496.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8496.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8497.misc b/changelog.d/8497.misc deleted file mode 100644 index 8bc05e8df63b..000000000000 --- a/changelog.d/8497.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a threadsafety bug in unit tests. diff --git a/changelog.d/8499.misc b/changelog.d/8499.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8499.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8501.feature b/changelog.d/8501.feature deleted file mode 100644 index 5220ddd48252..000000000000 --- a/changelog.d/8501.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). diff --git a/changelog.d/8502.feature b/changelog.d/8502.feature deleted file mode 100644 index faab8d30422d..000000000000 --- a/changelog.d/8502.feature +++ /dev/null @@ -1 +0,0 @@ -Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. diff --git a/changelog.d/8503.misc b/changelog.d/8503.misc deleted file mode 100644 index edb1be8aa8e9..000000000000 --- a/changelog.d/8503.misc +++ /dev/null @@ -1 +0,0 @@ -Add user agent to user_daily_visits table. diff --git a/changelog.d/8505.misc b/changelog.d/8505.misc deleted file mode 100644 index 5aa5c113bd78..000000000000 --- a/changelog.d/8505.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to various parts of the code base. diff --git a/changelog.d/8507.misc b/changelog.d/8507.misc deleted file mode 100644 index 724da8a9960e..000000000000 --- a/changelog.d/8507.misc +++ /dev/null @@ -1 +0,0 @@ - Add type hints to various parts of the code base. diff --git a/changelog.d/8513.feature b/changelog.d/8513.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8513.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8514.misc b/changelog.d/8514.misc deleted file mode 100644 index 0e7ac4f2207d..000000000000 --- a/changelog.d/8514.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused code from the test framework. diff --git a/changelog.d/8515.misc b/changelog.d/8515.misc deleted file mode 100644 index 1f8aa292d81d..000000000000 --- a/changelog.d/8515.misc +++ /dev/null @@ -1 +0,0 @@ -Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. diff --git a/changelog.d/8517.bugfix b/changelog.d/8517.bugfix deleted file mode 100644 index 1ab623c59fcf..000000000000 --- a/changelog.d/8517.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error code for `/profile/{userId}/displayname` to be `M_BAD_JSON`. diff --git a/changelog.d/8526.doc b/changelog.d/8526.doc deleted file mode 100644 index cbf48680c12f..000000000000 --- a/changelog.d/8526.doc +++ /dev/null @@ -1 +0,0 @@ -Added note about docker in manhole.md regarding which ip address to bind to. Contributed by @Maquis196. diff --git a/changelog.d/8527.bugfix b/changelog.d/8527.bugfix deleted file mode 100644 index 727e0ba2992a..000000000000 --- a/changelog.d/8527.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.7.0 that could cause Synapse to insert values from non-state `m.room.retention` events into the `room_retention` database table. diff --git a/changelog.d/8529.doc b/changelog.d/8529.doc deleted file mode 100644 index 6e710e6527bb..000000000000 --- a/changelog.d/8529.doc +++ /dev/null @@ -1 +0,0 @@ -Document the new behaviour of the `allowed_lifetime_min` and `allowed_lifetime_max` settings in the room retention configuration. diff --git a/changelog.d/8535.feature b/changelog.d/8535.feature deleted file mode 100644 index 45342e66ad7d..000000000000 --- a/changelog.d/8535.feature +++ /dev/null @@ -1 +0,0 @@ -Support modifying event content in `ThirdPartyRules` modules. diff --git a/changelog.d/8536.bugfix b/changelog.d/8536.bugfix deleted file mode 100644 index 8d238cc00853..000000000000 --- a/changelog.d/8536.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix not sending events over federation when using sharded event writers. diff --git a/changelog.d/8537.misc b/changelog.d/8537.misc deleted file mode 100644 index 26309b5b9393..000000000000 --- a/changelog.d/8537.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`. diff --git a/changelog.d/8542.misc b/changelog.d/8542.misc deleted file mode 100644 index 63149fd9b982..000000000000 --- a/changelog.d/8542.misc +++ /dev/null @@ -1 +0,0 @@ -Improve database performance by executing more queries without starting transactions. diff --git a/changelog.d/8544.feature b/changelog.d/8544.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8544.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8547.misc b/changelog.d/8547.misc deleted file mode 100644 index fafb1c8347b2..000000000000 --- a/changelog.d/8547.misc +++ /dev/null @@ -1 +0,0 @@ -Enable mypy type checking for `synapse.util.caches`. diff --git a/changelog.d/8548.misc b/changelog.d/8548.misc deleted file mode 100644 index fba10bd731ca..000000000000 --- a/changelog.d/8548.misc +++ /dev/null @@ -1 +0,0 @@ -Rename `Cache` to `DeferredCache`, to better reflect its purpose. diff --git a/changelog.d/8561.misc b/changelog.d/8561.misc deleted file mode 100644 index a40dedfa8e6b..000000000000 --- a/changelog.d/8561.misc +++ /dev/null @@ -1 +0,0 @@ -Move metric registration code down into `LruCache`. diff --git a/changelog.d/8562.misc b/changelog.d/8562.misc deleted file mode 100644 index ebdbddb50048..000000000000 --- a/changelog.d/8562.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations for `LruCache`. diff --git a/changelog.d/8564.feature b/changelog.d/8564.feature deleted file mode 100644 index 45342e66ad7d..000000000000 --- a/changelog.d/8564.feature +++ /dev/null @@ -1 +0,0 @@ -Support modifying event content in `ThirdPartyRules` modules. diff --git a/changelog.d/8566.misc b/changelog.d/8566.misc deleted file mode 100644 index 453cf48ffa9c..000000000000 --- a/changelog.d/8566.misc +++ /dev/null @@ -1 +0,0 @@ -Add virtualenv-generated folders to `.gitignore`. \ No newline at end of file diff --git a/changelog.d/8580.bugfix b/changelog.d/8580.bugfix new file mode 100644 index 000000000000..31734fd97d71 --- /dev/null +++ b/changelog.d/8580.bugfix @@ -0,0 +1 @@ +Fix a bug where Synapse would blindly forward bad responses from federation to clients when retrieving profile information. diff --git a/changelog.d/8614.misc b/changelog.d/8614.misc new file mode 100644 index 000000000000..1bf9ea08f082 --- /dev/null +++ b/changelog.d/8614.misc @@ -0,0 +1 @@ +Don't instansiate Requester directly. diff --git a/changelog.d/8615.misc b/changelog.d/8615.misc new file mode 100644 index 000000000000..79fa7b7ff84d --- /dev/null +++ b/changelog.d/8615.misc @@ -0,0 +1 @@ +Type hints for `RegistrationStore`. diff --git a/changelog.d/8621.misc b/changelog.d/8621.misc new file mode 100644 index 000000000000..5720b665fed9 --- /dev/null +++ b/changelog.d/8621.misc @@ -0,0 +1 @@ +Remove unused OPTIONS handlers. diff --git a/changelog.d/8627.bugfix b/changelog.d/8627.bugfix new file mode 100644 index 000000000000..143cf95f9230 --- /dev/null +++ b/changelog.d/8627.bugfix @@ -0,0 +1 @@ +Fix email notifications for invites without local state. diff --git a/changelog.d/8628.bugfix b/changelog.d/8628.bugfix new file mode 100644 index 000000000000..1316136ca274 --- /dev/null +++ b/changelog.d/8628.bugfix @@ -0,0 +1 @@ +Fix handling of invalid group IDs to return a 400 rather than log an exception and return a 500. diff --git a/changelog.d/8632.bugfix b/changelog.d/8632.bugfix new file mode 100644 index 000000000000..7d834aa2e2e1 --- /dev/null +++ b/changelog.d/8632.bugfix @@ -0,0 +1 @@ +Fix handling of User-Agent headers that are invalid UTF-8, which caused user agents of users to not get correctly recorded. diff --git a/changelog.d/8634.misc b/changelog.d/8634.misc new file mode 100644 index 000000000000..c4f74ba7c946 --- /dev/null +++ b/changelog.d/8634.misc @@ -0,0 +1 @@ +Correct Synapse's PyPI package name in the OpenID Connect installation instructions. \ No newline at end of file diff --git a/changelog.d/8639.misc b/changelog.d/8639.misc new file mode 100644 index 000000000000..20a213df3930 --- /dev/null +++ b/changelog.d/8639.misc @@ -0,0 +1 @@ +Fix typos and spelling errors in the code. diff --git a/changelog.d/8640.misc b/changelog.d/8640.misc new file mode 100644 index 000000000000..cf6023f7835c --- /dev/null +++ b/changelog.d/8640.misc @@ -0,0 +1 @@ +Reduce number of OpenTracing spans started. diff --git a/changelog.d/8644.misc b/changelog.d/8644.misc new file mode 100644 index 000000000000..87f2b72924b2 --- /dev/null +++ b/changelog.d/8644.misc @@ -0,0 +1 @@ +Add field `total` to device list in admin API. \ No newline at end of file diff --git a/changelog.d/8648.bugfix b/changelog.d/8648.bugfix new file mode 100644 index 000000000000..aa71ad0ff2e8 --- /dev/null +++ b/changelog.d/8648.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. \ No newline at end of file diff --git a/changelog.d/8654.bugfix b/changelog.d/8654.bugfix new file mode 100644 index 000000000000..91d3265b7f82 --- /dev/null +++ b/changelog.d/8654.bugfix @@ -0,0 +1 @@ +Fix `user_daily_visits` to not have duplicate rows for UA. Broke in v1.22.0rc1. diff --git a/changelog.d/8656.bugfix b/changelog.d/8656.bugfix new file mode 100644 index 000000000000..d6415e8282af --- /dev/null +++ b/changelog.d/8656.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.22.0rc1 where presence events were not properly passed to application services. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 7ca902faba25..0f3d99c82698 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -375,7 +375,8 @@ A response body like the following is returned: "last_seen_ts": 1474491775025, "user_id": "" } - ] + ], + "total": 2 } **Parameters** @@ -400,6 +401,8 @@ The following fields are returned in the JSON response body: devices was last seen. (May be a few minutes out of date, for efficiency reasons). - ``user_id`` - Owner of device. +- ``total`` - Total number of user's devices. + Delete multiple devices ------------------ Deletes the given devices for a specific ``user_id``, and invalidates diff --git a/docs/openid.md b/docs/openid.md index 48736819995a..a836bb76dbfd 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -37,7 +37,7 @@ as follows: provided by `matrix.org` so no further action is needed. * If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip - install synapse[oidc]` to install the necessary dependencies. + install matrix-synapse[oidc]` to install the necessary dependencies. * For other installation mechanisms, see the documentation provided by the maintainer. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 061226ea6fc1..07f162856869 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1886,7 +1886,7 @@ sso: # and issued at ("iat") claims are validated if present. # # Note that this is a non-standard login type and client support is -# expected to be non-existant. +# expected to be non-existent. # # See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md. # @@ -2402,7 +2402,7 @@ spam_checker: # # Options for the rules include: # -# user_id: Matches agaisnt the creator of the alias +# user_id: Matches against the creator of the alias # room_id: Matches against the room ID being published # alias: Matches against any current local or canonical aliases # associated with the room @@ -2448,7 +2448,7 @@ opentracing: # This is a list of regexes which are matched against the server_name of the # homeserver. # - # By defult, it is empty, so no servers are matched. + # By default, it is empty, so no servers are matched. # #homeserver_whitelist: # - ".*" diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 55a48a9ed622..e26657f9fe77 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -59,7 +59,7 @@ root: # then write them to a file. # # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuation for the `twisted` logger above, in + # also need to update the configuration for the `twisted` logger above, in # this case.) # handlers: [buffer] diff --git a/mypy.ini b/mypy.ini index b5db54ee3b93..59d9074c3b06 100644 --- a/mypy.ini +++ b/mypy.ini @@ -15,8 +15,9 @@ files = synapse/events/builder.py, synapse/events/spamcheck.py, synapse/federation, - synapse/handlers/appservice.py, + synapse/handlers/_base.py, synapse/handlers/account_data.py, + synapse/handlers/appservice.py, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, synapse/handlers/deactivate_account.py, @@ -32,6 +33,7 @@ files = synapse/handlers/pagination.py, synapse/handlers/password_policy.py, synapse/handlers/presence.py, + synapse/handlers/profile.py, synapse/handlers/read_marker.py, synapse/handlers/room.py, synapse/handlers/room_member.py, @@ -55,6 +57,7 @@ files = synapse/spam_checker_api, synapse/state, synapse/storage/databases/main/events.py, + synapse/storage/databases/main/registration.py, synapse/storage/databases/main/stream.py, synapse/storage/databases/main/ui_auth.py, synapse/storage/database.py, diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 2d0b59ab534e..6c7664ad4a42 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -22,6 +22,7 @@ import logging import sys import time import traceback +from typing import Optional import yaml @@ -152,7 +153,7 @@ IGNORED_TABLES = { # Error returned by the run function. Used at the top-level part of the script to # handle errors and return codes. -end_error = None +end_error = None # type: Optional[str] # The exec_info for the error, if any. If error is defined but not exec_info the script # will show only the error message without the stacktrace, if exec_info is defined but # not the error then the script will show nothing outside of what's printed in the run @@ -635,7 +636,7 @@ class Porter(object): self.progress.done() except Exception as e: global end_error_exec_info - end_error = e + end_error = str(e) end_error_exec_info = sys.exc_info() logger.exception("") finally: diff --git a/setup.py b/setup.py index 08843fe2a3e4..2f4a3170d268 100755 --- a/setup.py +++ b/setup.py @@ -102,6 +102,8 @@ def exec_file(path_segments): "flake8", ] +CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"] + # Dependencies which are exclusively required by unit test code. This is # NOT a list of all modules that are necessary to run the unit tests. # Tests assume that all optional dependencies are installed. diff --git a/synapse/__init__.py b/synapse/__init__.py index 83b8e4897f3c..c9d53e767a1b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.2" +__version__ = "1.22.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/api/auth.py b/synapse/api/auth.py index bff87fabde75..526cb58c5f6a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -184,9 +184,7 @@ async def get_user_by_req( """ try: ip_addr = self.hs.get_ip_from_request(request) - user_agent = request.requestHeaders.getRawHeaders( - b"User-Agent", default=[b""] - )[0].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") access_token = self.get_access_token_from_request(request) diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index ad3c408519ee..58291afc2231 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -60,6 +60,13 @@ logger = logging.getLogger(__name__) +# Maximum number of events to provide in an AS transaction. +MAX_PERSISTENT_EVENTS_PER_TRANSACTION = 100 + +# Maximum number of ephemeral events to provide in an AS transaction. +MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100 + + class ApplicationServiceScheduler: """ Public facing API for this module. Does the required DI to tie the components together. This also serves as the "event_pool", which in this @@ -136,10 +143,17 @@ async def _send_request(self, service: ApplicationService): self.requests_in_flight.add(service.id) try: while True: - events = self.queued_events.pop(service.id, []) - ephemeral = self.queued_ephemeral.pop(service.id, []) + all_events = self.queued_events.get(service.id, []) + events = all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION] + del all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION] + + all_events_ephemeral = self.queued_ephemeral.get(service.id, []) + ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] + del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] + if not events and not ephemeral: return + try: await self.txn_ctrl.send(service, events, ephemeral) except Exception: diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt_config.py index 3252ad9e7f41..f30330abb6d5 100644 --- a/synapse/config/jwt_config.py +++ b/synapse/config/jwt_config.py @@ -63,7 +63,7 @@ def generate_config_section(self, **kwargs): # and issued at ("iat") claims are validated if present. # # Note that this is a non-standard login type and client support is - # expected to be non-existant. + # expected to be non-existent. # # See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md. # diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 13d6f6a3ea68..6b7be28aee7d 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -105,7 +105,7 @@ # then write them to a file. # # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuation for the `twisted` logger above, in + # also need to update the configuration for the `twisted` logger above, in # this case.) # handlers: [buffer] diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d7e3690a32fb..b0a77a2e431e 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -143,7 +143,7 @@ def read_config(self, config, **kwargs): RoomCreationPreset.TRUSTED_PRIVATE_CHAT, } - # Pull the creater/inviter from the configuration, this gets used to + # Pull the creator/inviter from the configuration, this gets used to # send invites for invite-only rooms. mxid_localpart = config.get("auto_join_mxid_localpart") self.auto_join_user_id = None diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 6de1f9d1038a..92e1b6752827 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -99,7 +99,7 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # # Options for the rules include: # - # user_id: Matches agaisnt the creator of the alias + # user_id: Matches against the creator of the alias # room_id: Matches against the room ID being published # alias: Matches against any current local or canonical aliases # associated with the room diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 8be134611388..0c1a854f096b 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -67,7 +67,7 @@ def generate_config_section(cls, **kwargs): # This is a list of regexes which are matched against the server_name of the # homeserver. # - # By defult, it is empty, so no servers are matched. + # By default, it is empty, so no servers are matched. # #homeserver_whitelist: # - ".*" diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 79668a402e14..57fd426e8700 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -149,7 +149,7 @@ def get_options(self, host: bytes): return SSLClientConnectionCreator(host, ssl_context, should_verify) def creatorForNetloc(self, hostname, port): - """Implements the IPolicyForHTTPS interace so that this can be passed + """Implements the IPolicyForHTTPS interface so that this can be passed directly to agents. """ return self.get_options(hostname) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 65df62107f78..e203206865ae 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -59,7 +59,7 @@ def __get__(self, instance, owner=None): # # To exclude the KeyError from the traceback, we explicitly # 'raise from e1.__context__' (which is better than 'raise from None', - # becuase that would omit any *earlier* exceptions). + # because that would omit any *earlier* exceptions). # raise AttributeError( "'%s' has no '%s' property" % (type(instance), self.key) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 355cbe05f13b..14f7f1156f38 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -180,7 +180,7 @@ def only_fields(dictionary, fields): in 'fields'. If there are no event fields specified then all fields are included. - The entries may include '.' charaters to indicate sub-fields. + The entries may include '.' characters to indicate sub-fields. So ['content.body'] will include the 'body' field of the 'content' object. A literal '.' character in a field name may be escaped using a '\'. diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index a86b3debc5ce..41cf07cc881b 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -22,7 +22,7 @@ If a user leaves (or gets kicked out of) a group, either side can still use their attestation to "prove" their membership, until the attestation expires. Therefore attestations shouldn't be relied on to prove membership in important -cases, but can for less important situtations, e.g. showing a users membership +cases, but can for less important situations, e.g. showing a users membership of groups on their profile, showing flairs, etc. An attestation is a signed blob of json that looks like: diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index e5f85b472dd2..0d042cbfac83 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -113,7 +113,7 @@ async def get_group_summary(self, group_id, requester_user_id): entry = await self.room_list_handler.generate_room_entry( room_id, len(joined_users), with_alias=False, allow_private=True ) - entry = dict(entry) # so we don't change whats cached + entry = dict(entry) # so we don't change what's cached entry.pop("room_id", None) room_entry["profile"] = entry @@ -550,7 +550,7 @@ async def update_room_in_group( group_id, room_id, is_public=is_public ) else: - raise SynapseError(400, "Uknown config option") + raise SynapseError(400, "Unknown config option") return {} diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 0206320e9692..bd8e71ae56a3 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import TYPE_CHECKING, Optional import synapse.state import synapse.storage @@ -22,6 +23,9 @@ from synapse.api.ratelimiting import Ratelimiter from synapse.types import UserID +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) @@ -30,11 +34,7 @@ class BaseHandler: Common base class for the event handlers. """ - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): - """ + def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() # type: synapse.storage.DataStore self.auth = hs.get_auth() self.notifier = hs.get_notifier() @@ -56,7 +56,7 @@ def __init__(self, hs): clock=self.clock, rate_hz=self.hs.config.rc_admin_redaction.per_second, burst_count=self.hs.config.rc_admin_redaction.burst_count, - ) + ) # type: Optional[Ratelimiter] else: self.admin_redaction_ratelimiter = None @@ -127,15 +127,15 @@ async def maybe_kick_guest_users(self, event, context=None): if guest_access != "can_join": if context: current_state_ids = await context.get_current_state_ids() - current_state = await self.store.get_events( + current_state_dict = await self.store.get_events( list(current_state_ids.values()) ) + current_state = list(current_state_dict.values()) else: - current_state = await self.state_handler.get_current_state( + current_state_map = await self.state_handler.get_current_state( event.room_id ) - - current_state = list(current_state.values()) + current_state = list(current_state_map.values()) logger.info("maybe_kick_guest_users %r", current_state) await self.kick_guest_users(current_state) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index f33044e97aa6..fd4f762f333d 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -22,7 +22,7 @@ from synapse.api.errors import StoreError from synapse.logging.context import make_deferred_yieldable -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.types import UserID from synapse.util import stringutils @@ -63,16 +63,10 @@ def __init__(self, hs): self._raw_from = email.utils.parseaddr(self._from_string)[1] # Check the renewal emails to send and send them every 30min. - def send_emails(): - # run as a background process to make sure that the database transactions - # have a logcontext to report to - return run_as_background_process( - "send_renewals", self._send_renewal_emails - ) - if hs.config.run_background_tasks: - self.clock.looping_call(send_emails, 30 * 60 * 1000) + self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) + @wrap_as_background_process("send_renewals") async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time configured in the ``renew_at`` parameter from the ``account_validity`` diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 1ce2091b4649..a7039445436d 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -88,7 +88,7 @@ async def export_user_data(self, user_id, writer): # We only try and fetch events for rooms the user has been in. If # they've been e.g. invited to a room without joining then we handle - # those seperately. + # those separately. rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id) for index, room in enumerate(rooms): @@ -226,7 +226,7 @@ def write_invite(self, room_id: str, event: FrozenEvent, state: StateMap[dict]): """ def finished(self): - """Called when all data has succesfully been exported and written. + """Called when all data has successfully been exported and written. This functions return value is passed to the caller of `export_user_data`. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 07240d3a14ba..3ed29a2c16ed 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Union from prometheus_client import Counter @@ -30,7 +30,10 @@ event_processing_loop_counter, event_processing_loop_room_count, ) -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.types import Collection, JsonDict, RoomStreamToken, UserID from synapse.util.metrics import Measure @@ -53,7 +56,7 @@ def __init__(self, hs): self.current_max = 0 self.is_processing = False - async def notify_interested_services(self, max_token: RoomStreamToken): + def notify_interested_services(self, max_token: RoomStreamToken): """Notifies (pushes) all application services interested in this event. Pushing is done asynchronously, so this method won't block for any @@ -72,6 +75,12 @@ async def notify_interested_services(self, max_token: RoomStreamToken): if self.is_processing: return + # We only start a new background process if necessary rather than + # optimistically (to cut down on overhead). + self._notify_interested_services(max_token) + + @wrap_as_background_process("notify_interested_services") + async def _notify_interested_services(self, max_token: RoomStreamToken): with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: @@ -166,8 +175,11 @@ async def handle_room_events(events): finally: self.is_processing = False - async def notify_interested_services_ephemeral( - self, stream_key: str, new_token: Optional[int], users: Collection[UserID] = [], + def notify_interested_services_ephemeral( + self, + stream_key: str, + new_token: Optional[int], + users: Collection[Union[str, UserID]] = [], ): """This is called by the notifier in the background when a ephemeral event handled by the homeserver. @@ -183,13 +195,34 @@ async def notify_interested_services_ephemeral( new_token: The latest stream token users: The user(s) involved with the event. """ + if not self.notify_appservices: + return + + if stream_key not in ("typing_key", "receipt_key", "presence_key"): + return + services = [ service for service in self.store.get_app_services() if service.supports_ephemeral ] - if not services or not self.notify_appservices: + if not services: return + + # We only start a new background process if necessary rather than + # optimistically (to cut down on overhead). + self._notify_interested_services_ephemeral( + services, stream_key, new_token, users + ) + + @wrap_as_background_process("notify_interested_services_ephemeral") + async def _notify_interested_services_ephemeral( + self, + services: List[ApplicationService], + stream_key: str, + new_token: Optional[int], + users: Collection[Union[str, UserID]], + ): logger.info("Checking interested services for %s" % (stream_key)) with Measure(self.clock, "notify_interested_services_ephemeral"): for service in services: @@ -203,16 +236,16 @@ async def notify_interested_services_ephemeral( events = await self._handle_receipts(service) if events: self.scheduler.submit_ephemeral_events_for_as(service, events) - await self.store.set_type_stream_id_for_appservice( - service, "read_receipt", new_token - ) + await self.store.set_type_stream_id_for_appservice( + service, "read_receipt", new_token + ) elif stream_key == "presence_key": events = await self._handle_presence(service, users) if events: self.scheduler.submit_ephemeral_events_for_as(service, events) - await self.store.set_type_stream_id_for_appservice( - service, "presence", new_token - ) + await self.store.set_type_stream_id_for_appservice( + service, "presence", new_token + ) async def _handle_typing(self, service: ApplicationService, new_token: int): typing_source = self.event_sources.sources["typing"] @@ -237,7 +270,7 @@ async def _handle_receipts(self, service: ApplicationService): return receipts async def _handle_presence( - self, service: ApplicationService, users: Collection[UserID] + self, service: ApplicationService, users: Collection[Union[str, UserID]] ): events = [] # type: List[JsonDict] presence_source = self.event_sources.sources["presence"] @@ -245,6 +278,9 @@ async def _handle_presence( service, "presence" ) for user in users: + if isinstance(user, str): + user = UserID.from_string(user) + interested = await service.is_interested_in_presence(user, self.store) if not interested: continue @@ -252,7 +288,7 @@ async def _handle_presence( user=user, service=service, from_key=from_key, ) time_now = self.clock.time_msec() - presence_events = [ + events.extend( { "type": "m.presence", "sender": event.user_id, @@ -261,8 +297,9 @@ async def _handle_presence( ), } for event in presence_events - ] - events = events + presence_events + ) + + return events async def query_user_exists(self, user_id): """Check if any application service knows this user_id exists. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 1d1ddc22454b..dd14ab69d733 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -470,9 +470,7 @@ async def check_ui_auth( # authentication flow. await self.store.set_ui_auth_clientdict(sid, clientdict) - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") await self.store.add_user_agent_ip_to_ui_auth_session( session.session_id, user_agent, clientip @@ -692,7 +690,7 @@ async def get_access_token_for_user_id( Creates a new access token for the user with the given user ID. The user is assumed to have been authenticated by some other - machanism (e.g. CAS), and the user_id converted to the canonical case. + mechanism (e.g. CAS), and the user_id converted to the canonical case. The device will be recorded in the table if it is not there already. @@ -1122,20 +1120,22 @@ async def validate_hash( Whether self.hash(password) == stored_hash. """ - def _do_validate_hash(): + def _do_validate_hash(checked_hash: bytes): # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) return bcrypt.checkpw( pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"), - stored_hash, + checked_hash, ) if stored_hash: if not isinstance(stored_hash, bytes): stored_hash = stored_hash.encode("ascii") - return await defer_to_thread(self.hs.get_reactor(), _do_validate_hash) + return await defer_to_thread( + self.hs.get_reactor(), _do_validate_hash, stored_hash + ) else: return False diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py index a4cc4b9a5a18..048a3b3c0bba 100644 --- a/synapse/handlers/cas_handler.py +++ b/synapse/handlers/cas_handler.py @@ -212,9 +212,7 @@ async def handle_ticket( else: if not registered_user_id: # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders( - b"User-Agent", default=[b""] - )[0].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") ip_address = self.hs.get_ip_from_request(request) registered_user_id = await self._registration_handler.register_user( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index fde8f005318e..c386957706a3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -112,7 +112,7 @@ class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: a) handling received Pdus before handing them on as Events to the rest - of the homeserver (including auth and state conflict resoultion) + of the homeserver (including auth and state conflict resolutions) b) converting events that were produced by local clients that may need to be sent to remote homeservers. c) doing the necessary dances to invite remote users and join remote @@ -477,7 +477,7 @@ async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): # ---- # # Update richvdh 2018/09/18: There are a number of problems with timing this - # request out agressively on the client side: + # request out aggressively on the client side: # # - it plays badly with the server-side rate-limiter, which starts tarpitting you # if you send too many requests at once, so you end up with the server carefully @@ -495,13 +495,13 @@ async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): # we'll end up back here for the *next* PDU in the list, which exacerbates the # problem. # - # - the agressive 10s timeout was introduced to deal with incoming federation + # - the aggressive 10s timeout was introduced to deal with incoming federation # requests taking 8 hours to process. It's not entirely clear why that was going # on; certainly there were other issues causing traffic storms which are now # resolved, and I think in any case we may be more sensible about our locking # now. We're *certainly* more sensible about our logging. # - # All that said: Let's try increasing the timout to 60s and see what happens. + # All that said: Let's try increasing the timeout to 60s and see what happens. try: missing_events = await self.federation_client.get_missing_events( @@ -1120,7 +1120,7 @@ async def try_backfill(domains): logger.info(str(e)) continue except RequestSendFailed as e: - logger.info("Falied to get backfill from %s because %s", dom, e) + logger.info("Failed to get backfill from %s because %s", dom, e) continue except FederationDeniedError as e: logger.info(e) @@ -1545,7 +1545,7 @@ async def on_send_join_request(self, origin, pdu): # # The reasons we have the destination server rather than the origin # server send it are slightly mysterious: the origin server should have - # all the neccessary state once it gets the response to the send_join, + # all the necessary state once it gets the response to the send_join, # so it could send the event itself if it wanted to. It may be that # doing it this way reduces failure modes, or avoids certain attacks # where a new server selectively tells a subset of the federation that @@ -1649,7 +1649,7 @@ async def do_remotely_reject_invite( event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True - # Try the host that we succesfully called /make_leave/ on first for + # Try the host that we successfully called /make_leave/ on first for # the /send_leave/ request. host_list = list(target_hosts) try: diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 9684e60fc8b6..abd8d2af4449 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -17,7 +17,7 @@ import logging from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError -from synapse.types import get_domain_from_id +from synapse.types import GroupID, get_domain_from_id logger = logging.getLogger(__name__) @@ -28,6 +28,9 @@ def _create_rerouter(func_name): """ async def f(self, group_id, *args, **kwargs): + if not GroupID.is_valid(group_id): + raise SynapseError(400, "%s was not legal group ID" % (group_id,)) + if self.is_mine_id(group_id): return await getattr(self.groups_server_handler, func_name)( group_id, *args, **kwargs @@ -346,7 +349,7 @@ async def join_group(self, group_id, user_id, content): server_name=get_domain_from_id(group_id), ) - # TODO: Check that the group is public and we're being added publically + # TODO: Check that the group is public and we're being added publicly is_publicised = content.get("publicise", False) token = await self.store.register_user_group_membership( @@ -391,7 +394,7 @@ async def accept_invite(self, group_id, user_id, content): server_name=get_domain_from_id(group_id), ) - # TODO: Check that the group is public and we're being added publically + # TODO: Check that the group is public and we're being added publicly is_publicised = content.get("publicise", False) token = await self.store.register_user_group_membership( diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 98075f48d2b3..cb11754bf878 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -293,6 +293,10 @@ async def room_initial_sync( user_id, room_id, pagin_config, membership, is_peeking ) elif membership == Membership.LEAVE: + # The member_event_id will always be available if membership is set + # to leave. + assert member_event_id + result = await self._room_initial_sync_parted( user_id, room_id, pagin_config, membership, member_event_id, is_peeking ) @@ -315,7 +319,7 @@ async def _room_initial_sync_parted( user_id: str, room_id: str, pagin_config: PaginationConfig, - membership: Membership, + membership: str, member_event_id: str, is_peeking: bool, ) -> JsonDict: @@ -367,7 +371,7 @@ async def _room_initial_sync_joined( user_id: str, room_id: str, pagin_config: PaginationConfig, - membership: Membership, + membership: str, is_peeking: bool, ) -> JsonDict: current_state = await self.state.get_current_state(room_id=room_id) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d6855c60ea72..f1b4d35182cc 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -657,7 +657,7 @@ async def deduplicate_state_event( context: The event context. Returns: - The previous verion of the event is returned, if it is found in the + The previous version of the event is returned, if it is found in the event context. Otherwise, None is returned. """ prev_state_ids = await context.get_prev_state_ids() diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 05ac86e69714..331d4e7e963c 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -217,7 +217,7 @@ def _uses_userinfo(self) -> bool: This is based on the requested scopes: if the scopes include ``openid``, the provider should give use an ID token containing the - user informations. If not, we should fetch them using the + user information. If not, we should fetch them using the ``access_token`` with the ``userinfo_endpoint``. """ @@ -426,7 +426,7 @@ async def _exchange_code(self, code: str) -> Token: return resp async def _fetch_userinfo(self, token: Token) -> UserInfo: - """Fetch user informations from the ``userinfo_endpoint``. + """Fetch user information from the ``userinfo_endpoint``. Args: token: the token given by the ``token_endpoint``. @@ -695,9 +695,7 @@ async def handle_oidc_callback(self, request: SynapseRequest) -> None: return # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") ip_address = self.hs.get_ip_from_request(request) # Call the mapper to register/login the user @@ -756,7 +754,7 @@ def _generate_oidc_session_token( Defaults to an hour. Returns: - A signed macaroon token with the session informations. + A signed macaroon token with the session information. """ macaroon = pymacaroons.Macaroon( location=self._server_name, identifier="key", key=self._macaroon_secret_key, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 1000ac95ff18..49a00eed9ce2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -802,7 +802,7 @@ async def get_all_presence_updates( between the requested tokens due to the limit. The token returned can be used in a subsequent call to this - function to get further updatees. + function to get further updates. The updates are a list of 2-tuples of stream ID and the row data """ @@ -977,7 +977,7 @@ def should_notify(old_state, new_state): new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY ): - # Only notify about last active bumps if we're not currently acive + # Only notify about last active bumps if we're not currently active if not new_state.currently_active: notify_reason_counter.labels("last_active_change_online").inc() return True diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index b78493875522..3875e53c08da 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -12,9 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import random +from typing import TYPE_CHECKING, Optional from synapse.api.errors import ( AuthError, @@ -24,11 +24,20 @@ StoreError, SynapseError, ) -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import UserID, create_requester, get_domain_from_id +from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.types import ( + JsonDict, + Requester, + UserID, + create_requester, + get_domain_from_id, +) from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) MAX_DISPLAYNAME_LEN = 256 @@ -45,7 +54,7 @@ class ProfileHandler(BaseHandler): PROFILE_UPDATE_MS = 60 * 1000 PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation = hs.get_federation_client() @@ -57,10 +66,10 @@ def __init__(self, hs): if hs.config.run_background_tasks: self.clock.looping_call( - self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS + self._update_remote_profile_cache, self.PROFILE_UPDATE_MS ) - async def get_profile(self, user_id): + async def get_profile(self, user_id: str) -> JsonDict: target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): @@ -89,11 +98,18 @@ async def get_profile(self, user_id): except RequestSendFailed as e: raise SynapseError(502, "Failed to fetch profile") from e except HttpResponseException as e: + if e.code < 500 and e.code != 404: + # Other codes are not allowed in c2s API + logger.info( + "Server replied with wrong response: %s %s", e.code, e.msg + ) + + raise SynapseError(502, "Failed to fetch profile") raise e.to_synapse_error() - async def get_profile_from_cache(self, user_id): + async def get_profile_from_cache(self, user_id: str) -> JsonDict: """Get the profile information from our local cache. If the user is - ours then the profile information will always be corect. Otherwise, + ours then the profile information will always be correct. Otherwise, it may be out of date/missing. """ target_user = UserID.from_string(user_id) @@ -115,7 +131,7 @@ async def get_profile_from_cache(self, user_id): profile = await self.store.get_from_remote_profile_cache(user_id) return profile or {} - async def get_displayname(self, target_user): + async def get_displayname(self, target_user: UserID) -> str: if self.hs.is_mine(target_user): try: displayname = await self.store.get_profile_displayname( @@ -143,15 +159,19 @@ async def get_displayname(self, target_user): return result["displayname"] async def set_displayname( - self, target_user, requester, new_displayname, by_admin=False - ): + self, + target_user: UserID, + requester: Requester, + new_displayname: str, + by_admin: bool = False, + ) -> None: """Set the displayname of a user Args: - target_user (UserID): the user whose displayname is to be changed. - requester (Requester): The user attempting to make this change. - new_displayname (str): The displayname to give this user. - by_admin (bool): Whether this change was made by an administrator. + target_user: the user whose displayname is to be changed. + requester: The user attempting to make this change. + new_displayname: The displayname to give this user. + by_admin: Whether this change was made by an administrator. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -176,8 +196,9 @@ async def set_displayname( 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) ) + displayname_to_set = new_displayname # type: Optional[str] if new_displayname == "": - new_displayname = None + displayname_to_set = None # If the admin changes the display name of a user, the requesting user cannot send # the join event to update the displayname in the rooms. @@ -185,7 +206,9 @@ async def set_displayname( if by_admin: requester = create_requester(target_user) - await self.store.set_profile_displayname(target_user.localpart, new_displayname) + await self.store.set_profile_displayname( + target_user.localpart, displayname_to_set + ) if self.hs.config.user_directory_search_all_users: profile = await self.store.get_profileinfo(target_user.localpart) @@ -195,7 +218,7 @@ async def set_displayname( await self._update_join_states(requester, target_user) - async def get_avatar_url(self, target_user): + async def get_avatar_url(self, target_user: UserID) -> str: if self.hs.is_mine(target_user): try: avatar_url = await self.store.get_profile_avatar_url( @@ -222,15 +245,19 @@ async def get_avatar_url(self, target_user): return result["avatar_url"] async def set_avatar_url( - self, target_user, requester, new_avatar_url, by_admin=False + self, + target_user: UserID, + requester: Requester, + new_avatar_url: str, + by_admin: bool = False, ): """Set a new avatar URL for a user. Args: - target_user (UserID): the user whose avatar URL is to be changed. - requester (Requester): The user attempting to make this change. - new_avatar_url (str): The avatar URL to give this user. - by_admin (bool): Whether this change was made by an administrator. + target_user: the user whose avatar URL is to be changed. + requester: The user attempting to make this change. + new_avatar_url: The avatar URL to give this user. + by_admin: Whether this change was made by an administrator. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -267,7 +294,7 @@ async def set_avatar_url( await self._update_join_states(requester, target_user) - async def on_profile_query(self, args): + async def on_profile_query(self, args: JsonDict) -> JsonDict: user = UserID.from_string(args["user_id"]) if not self.hs.is_mine(user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -292,7 +319,9 @@ async def on_profile_query(self, args): return response - async def _update_join_states(self, requester, target_user): + async def _update_join_states( + self, requester: Requester, target_user: UserID + ) -> None: if not self.hs.is_mine(target_user): return @@ -323,15 +352,17 @@ async def _update_join_states(self, requester, target_user): "Failed to update join event for room %s - %s", room_id, str(e) ) - async def check_profile_query_allowed(self, target_user, requester=None): + async def check_profile_query_allowed( + self, target_user: UserID, requester: Optional[UserID] = None + ) -> None: """Checks whether a profile query is allowed. If the 'require_auth_for_profile_requests' config flag is set to True and a 'requester' is provided, the query is only allowed if the two users share a room. Args: - target_user (UserID): The owner of the queried profile. - requester (None|UserID): The user querying for the profile. + target_user: The owner of the queried profile. + requester: The user querying for the profile. Raises: SynapseError(403): The two users share no room, or ne user couldn't @@ -370,11 +401,7 @@ async def check_profile_query_allowed(self, target_user, requester=None): raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) raise - def _start_update_remote_profile_cache(self): - return run_as_background_process( - "Update remote profile", self._update_remote_profile_cache - ) - + @wrap_as_background_process("Update remote profile") async def _update_remote_profile_cache(self): """Called periodically to check profiles of remote users we haven't checked in a while. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ec300d8877c6..c5b1f1f1e1f0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1268,7 +1268,7 @@ async def shutdown_room( ) # We now wait for the create room to come back in via replication so - # that we can assume that all the joins/invites have propogated before + # that we can assume that all the joins/invites have propagated before # we try and auto join below. await self._replication.wait_for_stream_position( self.hs.config.worker.events_shard_config.get_instance(new_room_id), diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 285c481a9604..fd6c5e9ea873 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -216,9 +216,7 @@ async def handle_saml_response(self, request: SynapseRequest) -> None: return # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") + user_agent = request.get_user_agent("") ip_address = self.hs.get_ip_from_request(request) # Call the mapper to register/login the user diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index e9402e6e2efc..66f1bbcfc42b 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -139,7 +139,7 @@ async def search(self, user, content, batch=None): # Filter to apply to results filter_dict = room_cat.get("filter", {}) - # What to order results by (impacts whether pagination can be doen) + # What to order results by (impacts whether pagination can be done) order_by = room_cat.get("order_by", "rank") # Return the current state of the rooms? diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index 7a4ae0727a7f..fb4f70e8e23d 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -32,7 +32,7 @@ async def _get_key_change(self, prev_event_id, event_id, key_name, public_value) Returns: None if the field in the events either both match `public_value` or if neither do, i.e. there has been no change. - True if it didnt match `public_value` but now does + True if it didn't match `public_value` but now does False if it did match `public_value` but now doesn't """ prev_event = None diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b527724bc492..32e53c2d2566 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -754,7 +754,7 @@ async def compute_state_delta( """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state - # updates even if they occured logically before the previous event. + # updates even if they occurred logically before the previous event. # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): @@ -1882,7 +1882,7 @@ async def _generate_room_entry( # members (as the client otherwise doesn't have enough info to form # the name itself). if sync_config.filter_collection.lazy_load_members() and ( - # we recalulate the summary: + # we recalculate the summary: # if there are membership changes in the timeline, or # if membership has changed during a gappy sync, or # if this is an initial sync. diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index d3692842e3b6..8758066c746a 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -371,7 +371,7 @@ async def get_all_typing_updates( between the requested tokens due to the limit. The token returned can be used in a subsequent call to this - function to get further updatees. + function to get further updates. The updates are a list of 2-tuples of stream ID and the row data """ diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 79393c8829fc..afbebfc20058 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -31,7 +31,7 @@ class UserDirectoryHandler(StateDeltasHandler): N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY The user directory is filled with users who this server can see are joined to a - world_readable or publically joinable room. We keep a database table up to date + world_readable or publicly joinable room. We keep a database table up to date by streaming changes of the current state and recalculating whether users should be in the directory or not when necessary. """ diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index a306faa267c1..1cc666fbf67f 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -172,7 +172,7 @@ async def _fetch_well_known(self, server_name: bytes) -> Tuple[bytes, float]: had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False) # We do this in two steps to differentiate between possibly transient - # errors (e.g. can't connect to host, 503 response) and more permenant + # errors (e.g. can't connect to host, 503 response) and more permanent # errors (such as getting a 404 response). response, body = await self._make_well_known_request( server_name, retry=had_valid_well_known diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c23a4d7c0cc0..04766ca965a1 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -587,7 +587,7 @@ def build_auth_headers( """ Builds the Authorization headers for a federation request Args: - destination (bytes|None): The desination homeserver of the request. + destination (bytes|None): The destination homeserver of the request. May be None if the destination is an identity server, in which case destination_is must be non-None. method (bytes): The HTTP method of the request @@ -640,7 +640,7 @@ async def put_json( backoff_on_404=False, try_trailing_slash_on_400=False, ): - """ Sends the specifed json data using PUT + """ Sends the specified json data using PUT Args: destination (str): The remote server to send the HTTP request @@ -729,7 +729,7 @@ async def post_json( ignore_backoff=False, args={}, ): - """ Sends the specifed json data using POST + """ Sends the specified json data using POST Args: destination (str): The remote server to send the HTTP request diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index cd94e789e8ca..7c5defec826e 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -109,7 +109,7 @@ # The set of all in flight requests, set[RequestMetrics] _in_flight_requests = set() -# Protects the _in_flight_requests set from concurrent accesss +# Protects the _in_flight_requests set from concurrent access _in_flight_requests_lock = threading.Lock() diff --git a/synapse/http/server.py b/synapse/http/server.py index 00b98af3d40f..65dbd339ac3e 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -182,7 +182,7 @@ def register_paths(self, method, path_patterns, callback): """ Register a callback that gets fired if we receive a http request with the given method for a path that matches the given regex. - If the regex contains groups these gets passed to the calback via + If the regex contains groups these gets passed to the callback via an unpacked tuple. Args: @@ -241,7 +241,7 @@ async def _async_render_wrapper(self, request: SynapseRequest): async def _async_render(self, request: Request): """Delegates to `_async_render_` methods, or returns a 400 if - no appropriate method exists. Can be overriden in sub classes for + no appropriate method exists. Can be overridden in sub classes for different routing. """ # Treat HEAD requests as GET requests. @@ -386,7 +386,7 @@ def _get_handler_for_request( async def _async_render(self, request): callback, servlet_classname, group_dict = self._get_handler_for_request(request) - # Make sure we have an appopriate name for this handler in prometheus + # Make sure we have an appropriate name for this handler in prometheus # (rather than the default of JsonResource). request.request_metrics.name = servlet_classname diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index fd90ba7828f4..b361b7cbaf43 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -272,7 +272,6 @@ class attribute containing a pre-compiled regular expression. The automatic on_PUT on_POST on_DELETE - on_OPTIONS Automatically handles turning CodeMessageExceptions thrown by these methods into the appropriate HTTP response. @@ -283,7 +282,7 @@ def register(self, http_server): if hasattr(self, "PATTERNS"): patterns = self.PATTERNS - for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"): + for method in ("GET", "PUT", "POST", "DELETE"): if hasattr(self, "on_%s" % (method,)): servlet_classname = self.__class__.__name__ method_handler = getattr(self, "on_%s" % (method,)) diff --git a/synapse/http/site.py b/synapse/http/site.py index 6e79b4782801..ddb1770b093f 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -109,8 +109,14 @@ def get_method(self): method = self.method.decode("ascii") return method - def get_user_agent(self): - return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] + def get_user_agent(self, default: str) -> str: + """Return the last User-Agent header, or the given default. + """ + user_agent = self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] + if user_agent is None: + return default + + return user_agent.decode("ascii", "replace") def render(self, resrc): # this is called once a Resource has been found to serve the request; in our @@ -161,7 +167,9 @@ async def handle_request(request): yield except Exception: # this should already have been caught, and sent back to the client as a 500. - logger.exception("Asynchronous messge handler raised an uncaught exception") + logger.exception( + "Asynchronous message handler raised an uncaught exception" + ) finally: # the request handler has finished its work and either sent the whole response # back, or handed over responsibility to a Producer. @@ -274,11 +282,7 @@ def _finished_processing(self): # with maximum recursion trying to log errors about # the charset problem. # c.f. https://github.com/matrix-org/synapse/issues/3471 - user_agent = self.get_user_agent() - if user_agent is not None: - user_agent = user_agent.decode("utf-8", "replace") - else: - user_agent = "-" + user_agent = self.get_user_agent("-") code = str(self.code) if not self.finished: diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py new file mode 100644 index 000000000000..0caf32591623 --- /dev/null +++ b/synapse/logging/_remote.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import traceback +from collections import deque +from ipaddress import IPv4Address, IPv6Address, ip_address +from math import floor +from typing import Callable, Optional + +import attr +from zope.interface import implementer + +from twisted.application.internet import ClientService +from twisted.internet.defer import Deferred +from twisted.internet.endpoints import ( + HostnameEndpoint, + TCP4ClientEndpoint, + TCP6ClientEndpoint, +) +from twisted.internet.interfaces import IPushProducer, ITransport +from twisted.internet.protocol import Factory, Protocol +from twisted.logger import ILogObserver, Logger, LogLevel + + +@attr.s +@implementer(IPushProducer) +class LogProducer: + """ + An IPushProducer that writes logs from its buffer to its transport when it + is resumed. + + Args: + buffer: Log buffer to read logs from. + transport: Transport to write to. + format_event: A callable to format the log entry to a string. + """ + + transport = attr.ib(type=ITransport) + format_event = attr.ib(type=Callable[[dict], str]) + _buffer = attr.ib(type=deque) + _paused = attr.ib(default=False, type=bool, init=False) + + def pauseProducing(self): + self._paused = True + + def stopProducing(self): + self._paused = True + self._buffer = deque() + + def resumeProducing(self): + self._paused = False + + while self._paused is False and (self._buffer and self.transport.connected): + try: + # Request the next event and format it. + event = self._buffer.popleft() + msg = self.format_event(event) + + # Send it as a new line over the transport. + self.transport.write(msg.encode("utf8")) + except Exception: + # Something has gone wrong writing to the transport -- log it + # and break out of the while. + traceback.print_exc(file=sys.__stderr__) + break + + +@attr.s +@implementer(ILogObserver) +class TCPLogObserver: + """ + An IObserver that writes JSON logs to a TCP target. + + Args: + hs (HomeServer): The homeserver that is being logged for. + host: The host of the logging target. + port: The logging target's port. + format_event: A callable to format the log entry to a string. + maximum_buffer: The maximum buffer size. + """ + + hs = attr.ib() + host = attr.ib(type=str) + port = attr.ib(type=int) + format_event = attr.ib(type=Callable[[dict], str]) + maximum_buffer = attr.ib(type=int) + _buffer = attr.ib(default=attr.Factory(deque), type=deque) + _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) + _logger = attr.ib(default=attr.Factory(Logger)) + _producer = attr.ib(default=None, type=Optional[LogProducer]) + + def start(self) -> None: + + # Connect without DNS lookups if it's a direct IP. + try: + ip = ip_address(self.host) + if isinstance(ip, IPv4Address): + endpoint = TCP4ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + elif isinstance(ip, IPv6Address): + endpoint = TCP6ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + else: + raise ValueError("Unknown IP address provided: %s" % (self.host,)) + except ValueError: + endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) + + factory = Factory.forProtocol(Protocol) + self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) + self._service.startService() + self._connect() + + def stop(self): + self._service.stopService() + + def _connect(self) -> None: + """ + Triggers an attempt to connect then write to the remote if not already writing. + """ + if self._connection_waiter: + return + + self._connection_waiter = self._service.whenConnected(failAfterFailures=1) + + @self._connection_waiter.addErrback + def fail(r): + r.printTraceback(file=sys.__stderr__) + self._connection_waiter = None + self._connect() + + @self._connection_waiter.addCallback + def writer(r): + # We have a connection. If we already have a producer, and its + # transport is the same, just trigger a resumeProducing. + if self._producer and r.transport is self._producer.transport: + self._producer.resumeProducing() + self._connection_waiter = None + return + + # If the producer is still producing, stop it. + if self._producer: + self._producer.stopProducing() + + # Make a new producer and start it. + self._producer = LogProducer( + buffer=self._buffer, + transport=r.transport, + format_event=self.format_event, + ) + r.transport.registerProducer(self._producer, True) + self._producer.resumeProducing() + self._connection_waiter = None + + def _handle_pressure(self) -> None: + """ + Handle backpressure by shedding events. + + The buffer will, in this order, until the buffer is below the maximum: + - Shed DEBUG events + - Shed INFO events + - Shed the middle 50% of the events. + """ + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out DEBUGs + self._buffer = deque( + filter(lambda event: event["log_level"] != LogLevel.debug, self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out INFOs + self._buffer = deque( + filter(lambda event: event["log_level"] != LogLevel.info, self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Cut the middle entries out + buffer_split = floor(self.maximum_buffer / 2) + + old_buffer = self._buffer + self._buffer = deque() + + for i in range(buffer_split): + self._buffer.append(old_buffer.popleft()) + + end_buffer = [] + for i in range(buffer_split): + end_buffer.append(old_buffer.pop()) + + self._buffer.extend(reversed(end_buffer)) + + def __call__(self, event: dict) -> None: + self._buffer.append(event) + + # Handle backpressure, if it exists. + try: + self._handle_pressure() + except Exception: + # If handling backpressure fails,clear the buffer and log the + # exception. + self._buffer.clear() + self._logger.failure("Failed clearing backpressure") + + # Try and write immediately. + self._connect() diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 1b8916cfa281..9b46956ca96b 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -18,26 +18,11 @@ """ import json -import sys -import traceback -from collections import deque -from ipaddress import IPv4Address, IPv6Address, ip_address -from math import floor -from typing import IO, Optional +from typing import IO -import attr -from zope.interface import implementer +from twisted.logger import FileLogObserver -from twisted.application.internet import ClientService -from twisted.internet.defer import Deferred -from twisted.internet.endpoints import ( - HostnameEndpoint, - TCP4ClientEndpoint, - TCP6ClientEndpoint, -) -from twisted.internet.interfaces import IPushProducer, ITransport -from twisted.internet.protocol import Factory, Protocol -from twisted.logger import FileLogObserver, ILogObserver, Logger +from synapse.logging._remote import TCPLogObserver _encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":")) @@ -150,180 +135,22 @@ def formatEvent(_event: dict) -> str: return FileLogObserver(outFile, formatEvent) -@attr.s -@implementer(IPushProducer) -class LogProducer: +def TerseJSONToTCPLogObserver( + hs, host: str, port: int, metadata: dict, maximum_buffer: int +) -> FileLogObserver: """ - An IPushProducer that writes logs from its buffer to its transport when it - is resumed. - - Args: - buffer: Log buffer to read logs from. - transport: Transport to write to. - """ - - transport = attr.ib(type=ITransport) - _buffer = attr.ib(type=deque) - _paused = attr.ib(default=False, type=bool, init=False) - - def pauseProducing(self): - self._paused = True - - def stopProducing(self): - self._paused = True - self._buffer = deque() - - def resumeProducing(self): - self._paused = False - - while self._paused is False and (self._buffer and self.transport.connected): - try: - event = self._buffer.popleft() - self.transport.write(_encoder.encode(event).encode("utf8")) - self.transport.write(b"\n") - except Exception: - # Something has gone wrong writing to the transport -- log it - # and break out of the while. - traceback.print_exc(file=sys.__stderr__) - break - - -@attr.s -@implementer(ILogObserver) -class TerseJSONToTCPLogObserver: - """ - An IObserver that writes JSON logs to a TCP target. + A log observer that formats events to a flattened JSON representation. Args: hs (HomeServer): The homeserver that is being logged for. host: The host of the logging target. port: The logging target's port. - metadata: Metadata to be added to each log entry. + metadata: Metadata to be added to each log object. + maximum_buffer: The maximum buffer size. """ - hs = attr.ib() - host = attr.ib(type=str) - port = attr.ib(type=int) - metadata = attr.ib(type=dict) - maximum_buffer = attr.ib(type=int) - _buffer = attr.ib(default=attr.Factory(deque), type=deque) - _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) - _logger = attr.ib(default=attr.Factory(Logger)) - _producer = attr.ib(default=None, type=Optional[LogProducer]) - - def start(self) -> None: - - # Connect without DNS lookups if it's a direct IP. - try: - ip = ip_address(self.host) - if isinstance(ip, IPv4Address): - endpoint = TCP4ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) - elif isinstance(ip, IPv6Address): - endpoint = TCP6ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) - except ValueError: - endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) - - factory = Factory.forProtocol(Protocol) - self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) - self._service.startService() - self._connect() - - def stop(self): - self._service.stopService() - - def _connect(self) -> None: - """ - Triggers an attempt to connect then write to the remote if not already writing. - """ - if self._connection_waiter: - return - - self._connection_waiter = self._service.whenConnected(failAfterFailures=1) - - @self._connection_waiter.addErrback - def fail(r): - r.printTraceback(file=sys.__stderr__) - self._connection_waiter = None - self._connect() - - @self._connection_waiter.addCallback - def writer(r): - # We have a connection. If we already have a producer, and its - # transport is the same, just trigger a resumeProducing. - if self._producer and r.transport is self._producer.transport: - self._producer.resumeProducing() - self._connection_waiter = None - return - - # If the producer is still producing, stop it. - if self._producer: - self._producer.stopProducing() - - # Make a new producer and start it. - self._producer = LogProducer(buffer=self._buffer, transport=r.transport) - r.transport.registerProducer(self._producer, True) - self._producer.resumeProducing() - self._connection_waiter = None - - def _handle_pressure(self) -> None: - """ - Handle backpressure by shedding events. - - The buffer will, in this order, until the buffer is below the maximum: - - Shed DEBUG events - - Shed INFO events - - Shed the middle 50% of the events. - """ - if len(self._buffer) <= self.maximum_buffer: - return - - # Strip out DEBUGs - self._buffer = deque( - filter(lambda event: event["level"] != "DEBUG", self._buffer) - ) - - if len(self._buffer) <= self.maximum_buffer: - return - - # Strip out INFOs - self._buffer = deque( - filter(lambda event: event["level"] != "INFO", self._buffer) - ) - - if len(self._buffer) <= self.maximum_buffer: - return - - # Cut the middle entries out - buffer_split = floor(self.maximum_buffer / 2) - - old_buffer = self._buffer - self._buffer = deque() - - for i in range(buffer_split): - self._buffer.append(old_buffer.popleft()) - - end_buffer = [] - for i in range(buffer_split): - end_buffer.append(old_buffer.pop()) - - self._buffer.extend(reversed(end_buffer)) - - def __call__(self, event: dict) -> None: - flattened = flatten_event(event, self.metadata, include_time=True) - self._buffer.append(flattened) - - # Handle backpressure, if it exists. - try: - self._handle_pressure() - except Exception: - # If handling backpressure fails,clear the buffer and log the - # exception. - self._buffer.clear() - self._logger.failure("Failed clearing backpressure") + def formatEvent(_event: dict) -> str: + flattened = flatten_event(_event, metadata, include_time=True) + return _encoder.encode(flattened) + "\n" - # Try and write immediately. - self._connect() + return TCPLogObserver(hs, host, port, formatEvent, maximum_buffer) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index e58850faff86..ab586c318c03 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -317,7 +317,7 @@ def ensure_active_span_inner_2(*args, **kwargs): @contextlib.contextmanager -def _noop_context_manager(*args, **kwargs): +def noop_context_manager(*args, **kwargs): """Does exactly what it says on the tin""" yield @@ -413,7 +413,7 @@ def start_active_span( """ if opentracing is None: - return _noop_context_manager() + return noop_context_manager() return opentracing.tracer.start_active_span( operation_name, @@ -428,7 +428,7 @@ def start_active_span( def start_active_span_follows_from(operation_name, contexts): if opentracing is None: - return _noop_context_manager() + return noop_context_manager() references = [opentracing.follows_from(context) for context in contexts] scope = start_active_span(operation_name, references=references) @@ -459,7 +459,7 @@ def start_active_span_from_request( # Also, twisted uses byte arrays while opentracing expects strings. if opentracing is None: - return _noop_context_manager() + return noop_context_manager() header_dict = { k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() @@ -497,7 +497,7 @@ def start_active_span_from_edu( """ if opentracing is None: - return _noop_context_manager() + return noop_context_manager() carrier = json_decoder.decode(edu_content.get("context", "{}")).get( "opentracing", {} diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 5b73463504be..658f6ecd72a3 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.logging.opentracing import noop_context_manager, start_active_span if TYPE_CHECKING: import resource @@ -166,7 +167,7 @@ def update_metrics(self): ) -def run_as_background_process(desc: str, func, *args, **kwargs): +def run_as_background_process(desc: str, func, *args, bg_start_span=True, **kwargs): """Run the given function in its own logcontext, with resource metrics This should be used to wrap processes which are fired off to run in the @@ -180,6 +181,9 @@ def run_as_background_process(desc: str, func, *args, **kwargs): Args: desc: a description for this background process type func: a function, which may return a Deferred or a coroutine + bg_start_span: Whether to start an opentracing span. Defaults to True. + Should only be disabled for processes that will not log to or tag + a span. args: positional args for func kwargs: keyword args for func @@ -197,14 +201,17 @@ async def run(): with BackgroundProcessLoggingContext(desc) as context: context.request = "%s-%i" % (desc, count) - try: - result = func(*args, **kwargs) + ctx = noop_context_manager() + if bg_start_span: + ctx = start_active_span(desc, tags={"request_id": context.request}) + with ctx: + result = func(*args, **kwargs) - if inspect.isawaitable(result): - result = await result + if inspect.isawaitable(result): + result = await result - return result + return result except Exception: logger.exception( "Background process '%s' threw an exception", desc, @@ -265,7 +272,7 @@ def __exit__(self, type, value, traceback) -> None: super().__exit__(type, value, traceback) - # The background process has finished. We explictly remove and manually + # The background process has finished. We explicitly remove and manually # update the metrics here so that if nothing is scraping metrics the set # doesn't infinitely grow. with _bg_metrics_lock: diff --git a/synapse/notifier.py b/synapse/notifier.py index 2e993411b9ec..eb56b26f219c 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -40,7 +40,6 @@ from synapse.logging.context import PreserveLoggingContext from synapse.logging.utils import log_function from synapse.metrics import LaterGauge -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.streams.config import PaginationConfig from synapse.types import ( Collection, @@ -310,44 +309,37 @@ def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): """ # poke any interested application service. - run_as_background_process( - "_notify_app_services", self._notify_app_services, max_room_stream_token - ) - - run_as_background_process( - "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_token - ) + self._notify_app_services(max_room_stream_token) + self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) - async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): + def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: - await self.appservice_handler.notify_interested_services( - max_room_stream_token - ) + self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception("Error notifying application services of event") - async def _notify_app_services_ephemeral( + def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[UserID] = [], + users: Collection[Union[str, UserID]] = [], ): try: stream_token = None if isinstance(new_token, int): stream_token = new_token - await self.appservice_handler.notify_interested_services_ephemeral( + self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users ) except Exception: logger.exception("Error notifying application services of event") - async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): + def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: - await self._pusher_pool.on_new_notifications(max_room_stream_token) + self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception("Error pusher pool of event") @@ -384,16 +376,12 @@ def on_new_event( self.notify_replication() # Notify appservices - run_as_background_process( - "_notify_app_services_ephemeral", - self._notify_app_services_ephemeral, - stream_key, - new_token, - users, + self._notify_app_services_ephemeral( + stream_key, new_token, users, ) def on_new_replication_data(self) -> None: - """Used to inform replication listeners that something has happend + """Used to inform replication listeners that something has happened without waking up any of the normal user event streams""" self.notify_replication() diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 8047873ff1d9..2858b61fb1d1 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -37,7 +37,7 @@ def list_with_base_rules(rawrules, use_new_defaults=False): modified_base_rules = {r["rule_id"]: r for r in rawrules if r["priority_class"] < 0} # Remove the modified base rules from the list, They'll be added back - # in the default postions in the list. + # in the default positions in the list. rawrules = [r for r in rawrules if r["priority_class"] >= 0] # shove the server default rules for each kind onto the end of each diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c440f2545c5e..d9b5478b5331 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -390,12 +390,12 @@ async def get_rules(self, event, context): continue # If a user has left a room we remove their push rule. If they - # joined then we readd it later in _update_rules_with_member_event_ids + # joined then we re-add it later in _update_rules_with_member_event_ids ret_rules_by_user.pop(user_id, None) missing_member_event_ids[user_id] = event_id if missing_member_event_ids: - # If we have some memebr events we haven't seen, look them up + # If we have some member events we haven't seen, look them up # and fetch push rules for them if appropriate. logger.debug("Found new member events %r", missing_member_event_ids) await self._update_rules_with_member_event_ids( @@ -496,6 +496,6 @@ class _Invalidation(namedtuple("_Invalidation", ("cache", "room_id"))): # dedupe when we add callbacks to lru cache nodes, otherwise the number # of callbacks would grow. def __call__(self): - rules = self.cache.get(self.room_id, None, update_metrics=False) + rules = self.cache.get_immediate(self.room_id, None, update_metrics=False) if rules: rules.invalidate_all() diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 455a1acb46a8..38195c8eea8a 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -24,7 +24,7 @@ import bleach import jinja2 -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.api.errors import StoreError from synapse.config.emailconfig import EmailSubjectConfig from synapse.logging.context import make_deferred_yieldable @@ -317,9 +317,14 @@ async def send_email(self, email_address, subject, extra_template_vars): async def get_room_vars( self, room_id, user_id, notifs, notif_events, room_state_ids ): - my_member_event_id = room_state_ids[("m.room.member", user_id)] - my_member_event = await self.store.get_event(my_member_event_id) - is_invite = my_member_event.content["membership"] == "invite" + # Check if one of the notifs is an invite event for the user. + is_invite = False + for n in notifs: + ev = notif_events[n["event_id"]] + if ev.type == EventTypes.Member and ev.state_key == user_id: + if ev.content.get("membership") == Membership.INVITE: + is_invite = True + break room_name = await calculate_room_name(self.store, room_state_ids, user_id) @@ -387,8 +392,8 @@ async def get_notif_vars(self, notif, user_id, notif_event, room_state_ids): return ret async def get_message_vars(self, notif, event, room_state_ids): - if event.type != EventTypes.Message: - return + if event.type != EventTypes.Message and event.type != EventTypes.Encrypted: + return None sender_state_event_id = room_state_ids[("m.room.member", event.sender)] sender_state_event = await self.store.get_event(sender_state_event_id) @@ -399,10 +404,8 @@ async def get_message_vars(self, notif, event, room_state_ids): # sender_hash % the number of default images to choose from sender_hash = string_ordinal_total(event.sender) - msgtype = event.content.get("msgtype") - ret = { - "msgtype": msgtype, + "event_type": event.type, "is_historical": event.event_id != notif["event_id"], "id": event.event_id, "ts": event.origin_server_ts, @@ -411,6 +414,14 @@ async def get_message_vars(self, notif, event, room_state_ids): "sender_hash": sender_hash, } + # Encrypted messages don't have any additional useful information. + if event.type == EventTypes.Encrypted: + return ret + + msgtype = event.content.get("msgtype") + + ret["msgtype"] = msgtype + if msgtype == "m.text": self.add_text_message_vars(ret, event) elif msgtype == "m.image": @@ -455,16 +466,26 @@ async def make_summary_text( self.store, room_state_ids[room_id], user_id, fallback_to_members=False ) - my_member_event_id = room_state_ids[room_id][("m.room.member", user_id)] - my_member_event = await self.store.get_event(my_member_event_id) - if my_member_event.content["membership"] == "invite": - inviter_member_event_id = room_state_ids[room_id][ - ("m.room.member", my_member_event.sender) - ] - inviter_member_event = await self.store.get_event( - inviter_member_event_id + # See if one of the notifs is an invite event for the user + invite_event = None + for n in notifs_by_room[room_id]: + ev = notif_events[n["event_id"]] + if ev.type == EventTypes.Member and ev.state_key == user_id: + if ev.content.get("membership") == Membership.INVITE: + invite_event = ev + break + + if invite_event: + inviter_member_event_id = room_state_ids[room_id].get( + ("m.room.member", invite_event.sender) ) - inviter_name = name_from_member_event(inviter_member_event) + inviter_name = invite_event.sender + if inviter_member_event_id: + inviter_member_event = await self.store.get_event( + inviter_member_event_id, allow_none=True + ) + if inviter_member_event: + inviter_name = name_from_member_event(inviter_member_event) if room_name is None: return self.email_subjects.invite_from_person % { diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 0080c68ce28e..f32596498396 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -19,7 +19,10 @@ from prometheus_client import Gauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.push import PusherConfigException from synapse.push.emailpusher import EmailPusher from synapse.push.httppusher import HttpPusher @@ -187,7 +190,7 @@ async def remove_pushers_by_access_token(self, user_id, access_tokens): ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, max_token: RoomStreamToken): + def on_new_notifications(self, max_token: RoomStreamToken): if not self.pushers: # nothing to do here. return @@ -201,6 +204,17 @@ async def on_new_notifications(self, max_token: RoomStreamToken): # Nothing to do return + # We only start a new background process if necessary rather than + # optimistically (to cut down on overhead). + self._on_new_notifications(max_token) + + @wrap_as_background_process("on_new_notifications") + async def _on_new_notifications(self, max_token: RoomStreamToken): + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_id = max_token.stream + prev_stream_id = self._last_room_stream_id_seen self._last_room_stream_id_seen = max_stream_id diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 4b0ea0cc01cd..0f5b7adef781 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -15,7 +15,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY -from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.lrucache import LruCache from ._base import BaseSlavedStore @@ -24,9 +24,9 @@ class SlavedClientIpStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self.client_ip_last_seen = DeferredCache( - name="client_ip_last_seen", keylen=4, max_entries=50000 - ) # type: DeferredCache[tuple, int] + self.client_ip_last_seen = LruCache( + cache_name="client_ip_last_seen", keylen=4, max_size=50000 + ) # type: LruCache[tuple, int] async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) @@ -41,7 +41,7 @@ async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_i if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY: return - self.client_ip_last_seen.prefill(key, now) + self.client_ip_last_seen.set(key, now) self.hs.get_tcp_replication().send_user_ip( user_id, access_token, ip, user_agent, device_id, now diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index de19705c1f41..bc6ba709a785 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -166,7 +166,9 @@ def send_command(self, cmd: Command): Args: cmd (Command) """ - run_as_background_process("send-cmd", self._async_send_command, cmd) + run_as_background_process( + "send-cmd", self._async_send_command, cmd, bg_start_span=False + ) async def _async_send_command(self, cmd: Command): """Encode a replication command and send it over our outbound connection""" diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html index 1a6c70b5624d..6d76064d132f 100644 --- a/synapse/res/templates/notif.html +++ b/synapse/res/templates/notif.html @@ -1,41 +1,47 @@ -{% for message in notif.messages %} +{%- for message in notif.messages %} - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} - {% if message.sender_avatar_url %} + {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {%- if message.sender_avatar_url %} - {% else %} - {% if message.sender_hash % 3 == 0 %} + {%- else %} + {%- if message.sender_hash % 3 == 0 %} - {% elif message.sender_hash % 3 == 1 %} + {%- elif message.sender_hash % 3 == 1 %} - {% else %} + {%- else %} - {% endif %} - {% endif %} - {% endif %} + {%- endif %} + {%- endif %} + {%- endif %} - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} -
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
- {% endif %} + {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} +
{%- if message.msgtype == "m.emote" %}*{%- endif %} {{ message.sender_name }}
+ {%- endif %}
- {% if message.msgtype == "m.text" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.emote" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.notice" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.image" %} - - {% elif message.msgtype == "m.file" %} - {{ message.body_text_plain }} - {% endif %} + {%- if message.event_type == "m.room.encrypted" %} + An encrypted message. + {%- elif message.event_type == "m.room.message" %} + {%- if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.image" %} + + {%- elif message.msgtype == "m.file" %} + {{ message.body_text_plain }} + {%- else %} + A message with unrecognised content. + {%- endif %} + {%- endif %}
{{ message.ts|format_ts("%H:%M") }} -{% endfor %} +{%- endfor %} diff --git a/synapse/res/templates/notif.txt b/synapse/res/templates/notif.txt index a37bee98332c..1ee7da3c50ef 100644 --- a/synapse/res/templates/notif.txt +++ b/synapse/res/templates/notif.txt @@ -1,16 +1,22 @@ -{% for message in notif.messages %} -{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) -{% if message.msgtype == "m.text" %} +{%- for message in notif.messages %} +{%- if message.event_type == "m.room.encrypted" %} +An encrypted message. +{%- elif message.event_type == "m.room.message" %} +{%- if message.msgtype == "m.emote" %}* {%- endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{%- if message.msgtype == "m.text" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.emote" %} +{%- elif message.msgtype == "m.emote" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.notice" %} +{%- elif message.msgtype == "m.notice" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.image" %} +{%- elif message.msgtype == "m.image" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.file" %} +{%- elif message.msgtype == "m.file" %} {{ message.body_text_plain }} -{% endif %} -{% endfor %} +{%- else %} +A message with unrecognised content. +{%- endif %} +{%- endif %} +{%- endfor %} View {{ room.title }} at {{ notif.link }} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html index a2dfeb9e9f78..27d41827907f 100644 --- a/synapse/res/templates/notif_mail.html +++ b/synapse/res/templates/notif_mail.html @@ -2,8 +2,8 @@ @@ -18,21 +18,21 @@
{{ summary_text }}
- {% if app_name == "Riot" %} + {%- if app_name == "Riot" %} [Riot] - {% elif app_name == "Vector" %} + {%- elif app_name == "Vector" %} [Vector] - {% elif app_name == "Element" %} + {%- elif app_name == "Element" %} [Element] - {% else %} + {%- else %} [matrix] - {% endif %} + {%- endif %} - {% for room in rooms %} - {% include 'room.html' with context %} - {% endfor %} + {%- for room in rooms %} + {%- include 'room.html' with context %} + {%- endfor %} diff --git a/synapse/res/templates/notif_mail.txt b/synapse/res/templates/notif_mail.txt index 24843042a540..df3c253979ca 100644 --- a/synapse/res/templates/notif_mail.txt +++ b/synapse/res/templates/notif_mail.txt @@ -2,9 +2,9 @@ Hi {{ user_display_name }}, {{ summary_text }} -{% for room in rooms %} -{% include 'room.txt' with context %} -{% endfor %} +{%- for room in rooms %} +{%- include 'room.txt' with context %} +{%- endfor %} You can disable these notifications at {{ unsubscribe_link }} diff --git a/synapse/res/templates/room.html b/synapse/res/templates/room.html index b8525fef888c..4fc6f6ac9b31 100644 --- a/synapse/res/templates/room.html +++ b/synapse/res/templates/room.html @@ -1,23 +1,23 @@ - {% if room.invite %} + {%- if room.invite %} - {% else %} - {% for notif in room.notifs %} - {% include 'notif.html' with context %} - {% endfor %} - {% endif %} + {%- else %} + {%- for notif in room.notifs %} + {%- include 'notif.html' with context %} + {%- endfor %} + {%- endif %}
- {% if room.avatar_url %} + {%- if room.avatar_url %} - {% else %} - {% if room.hash % 3 == 0 %} + {%- else %} + {%- if room.hash % 3 == 0 %} - {% elif room.hash % 3 == 1 %} + {%- elif room.hash % 3 == 1 %} - {% else %} + {%- else %} - {% endif %} - {% endif %} + {%- endif %} + {%- endif %} {{ room.title }}
@@ -25,9 +25,9 @@
diff --git a/synapse/res/templates/room.txt b/synapse/res/templates/room.txt index 84648c710ece..df841e9e6f00 100644 --- a/synapse/res/templates/room.txt +++ b/synapse/res/templates/room.txt @@ -1,9 +1,9 @@ {{ room.title }} -{% if room.invite %} +{%- if room.invite %} You've been invited, join at {{ room.link }} -{% else %} - {% for notif in room.notifs %} - {% include 'notif.txt' with context %} - {% endfor %} -{% endif %} +{%- else %} + {%- for notif in room.notifs %} + {%- include 'notif.txt' with context %} + {%- endfor %} +{%- endif %} diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index a16386332205..ffd3aa38f768 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -119,7 +119,7 @@ async def on_GET(self, request, user_id): raise NotFoundError("Unknown user") devices = await self.device_handler.get_devices_by_user(target_user.to_string()) - return 200, {"devices": devices} + return 200, {"devices": devices, "total": len(devices)} class DeleteDevicesRestServlet(RestServlet): diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 1ecb77aa2694..6de4078290ba 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -67,9 +67,6 @@ async def on_GET(self, request): return 200, chunk - def on_OPTIONS(self, request): - return 200, {} - class EventRestServlet(RestServlet): PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index d7deb9300d71..94452fcbf581 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -110,10 +110,9 @@ def on_GET(self, request: SynapseRequest): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) - return 200, {"flows": flows} + flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) - def on_OPTIONS(self, request: SynapseRequest): - return 200, {} + return 200, {"flows": flows} async def on_POST(self, request: SynapseRequest): self._address_ratelimiter.ratelimit(request.getClientIP()) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index f792b50cdc02..ad8cea49c6ed 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -30,9 +30,6 @@ def __init__(self, hs): self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() - def on_OPTIONS(self, request): - return 200, {} - async def on_POST(self, request): requester = await self.auth.get_user_by_req(request, allow_expired=True) @@ -58,9 +55,6 @@ def __init__(self, hs): self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() - def on_OPTIONS(self, request): - return 200, {} - async def on_POST(self, request): requester = await self.auth.get_user_by_req(request, allow_expired=True) user_id = requester.user.to_string() diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 79d8e3057fec..23a529f8e3d3 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -86,9 +86,6 @@ async def on_PUT(self, request, user_id): return 200, {} - def on_OPTIONS(self, request): - return 200, {} - def register_servlets(hs, http_server): PresenceStatusRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index e7fcd2b1ffea..85a66458c5bb 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -67,9 +67,6 @@ async def on_PUT(self, request, user_id): return 200, {} - def on_OPTIONS(self, request, user_id): - return 200, {} - class ProfileAvatarURLRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)/avatar_url", v1=True) @@ -118,9 +115,6 @@ async def on_PUT(self, request, user_id): return 200, {} - def on_OPTIONS(self, request, user_id): - return 200, {} - class ProfileRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)", v1=True) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index f9eecb7cf5cd..241e535917a7 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -155,9 +155,6 @@ async def on_GET(self, request, path): else: raise UnrecognizedRequestError() - def on_OPTIONS(self, request, path): - return 200, {} - def notify_user(self, user_id): stream_id = self.store.get_max_push_rules_stream_id() self.notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 28dabf1c7ab6..8fe83f321a8e 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -60,9 +60,6 @@ async def on_GET(self, request): return 200, {"pushers": filtered_pushers} - def on_OPTIONS(self, _): - return 200, {} - class PushersSetRestServlet(RestServlet): PATTERNS = client_patterns("/pushers/set$", v1=True) @@ -140,9 +137,6 @@ async def on_POST(self, request): return 200, {} - def on_OPTIONS(self, _): - return 200, {} - class PushersRemoveRestServlet(RestServlet): """ @@ -182,9 +176,6 @@ async def on_GET(self, request): ) return None - def on_OPTIONS(self, _): - return 200, {} - def register_servlets(hs, http_server): PushersRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 00b439708222..25d3cc614806 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -72,20 +72,6 @@ def __init__(self, hs): def register(self, http_server): PATTERNS = "/createRoom" register_txn_path(self, PATTERNS, http_server) - # define CORS for all of /rooms in RoomCreateRestServlet for simplicity - http_server.register_paths( - "OPTIONS", - client_patterns("/rooms(?:/.*)?$", v1=True), - self.on_OPTIONS, - self.__class__.__name__, - ) - # define CORS for /createRoom[/txnid] - http_server.register_paths( - "OPTIONS", - client_patterns("/createRoom(?:/.*)?$", v1=True), - self.on_OPTIONS, - self.__class__.__name__, - ) def on_PUT(self, request, txn_id): set_tag("txn_id", txn_id) @@ -104,9 +90,6 @@ def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) return user_supplied_config - def on_OPTIONS(self, request): - return 200, {} - # TODO: Needs unit testing for generic events class RoomStateEventRestServlet(TransactionRestServlet): diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index b8d491ca5c95..d07ca2c47cd2 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -69,9 +69,6 @@ async def on_GET(self, request): }, ) - def on_OPTIONS(self, request): - return 200, {} - def register_servlets(hs, http_server): VoipRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e857cff17616..51effc4d8e7c 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -268,9 +268,6 @@ async def on_POST(self, request): return 200, {} - def on_OPTIONS(self, _): - return 200, {} - class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_patterns("/account/deactivate$") diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 5fbfae599101..fab077747f28 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -176,9 +176,6 @@ async def on_POST(self, request, stagetype): respond_with_html(request, 200, html) return None - def on_OPTIONS(self, _): - return 200, {} - def register_servlets(hs, http_server): AuthRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 395b6a82a978..8f2c8cd991ec 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -642,9 +642,6 @@ async def on_POST(self, request): return 200, return_dict - def on_OPTIONS(self, _): - return 200, {} - async def _do_appservice_registration(self, username, as_token, body): user_id = await self.registration_handler.appservice_register( username, as_token diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 3673e7f47e87..9137c4edb122 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -104,7 +104,7 @@ async def maybe_send_server_notice_to_user(self, user_id: str) -> None: def copy_with_str_subst(x: Any, substitutions: Any) -> Any: - """Deep-copy a structure, carrying out string substitions on any strings + """Deep-copy a structure, carrying out string substitutions on any strings Args: x (object): structure to be copied diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 5b0900aa3cb0..1fa3b280b42b 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -547,7 +547,7 @@ async def resolve_state_groups( event_map: a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing + used as a starting point for finding the state we need; any missing events will be requested via state_res_store. If None, all events will be fetched via state_res_store. diff --git a/synapse/state/v1.py b/synapse/state/v1.py index a493279cbd2e..85edae053dfe 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -56,7 +56,7 @@ async def resolve_events_with_store( event_map: a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing + used as a starting point for finding the state we need; any missing events will be requested via state_map_factory. If None, all events will be fetched via state_map_factory. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index edf94e7ad683..f57df0d72859 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -69,7 +69,7 @@ async def resolve_events_with_store( event_map: a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing + used as a starting point for finding the state we need; any missing events will be requested via state_res_store. If None, all events will be fetched via state_res_store. diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index 3678670ec78d..744800ec77c4 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -182,7 +182,7 @@ matrixLogin.passwordLogin = function() { }; /* - * The onLogin function gets called after a succesful login. + * The onLogin function gets called after a successful login. * * It is expected that implementations override this to be notified when the * login is complete. The response to the login call is provided as the single diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ab49d227de1c..2b196ded1bd0 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -76,14 +76,16 @@ def _attempt_to_invalidate_cache( """ try: - if key is None: - getattr(self, cache_name).invalidate_all() - else: - getattr(self, cache_name).invalidate(tuple(key)) + cache = getattr(self, cache_name) except AttributeError: # We probably haven't pulled in the cache in this worker, # which is fine. - pass + return + + if key is None: + cache.invalidate_all() + else: + cache.invalidate(tuple(key)) def db_to_json(db_content): diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 763722d6bce6..0217e631085a 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -160,7 +160,7 @@ def __enter__(self) -> "Connection": self.conn.__enter__() return self - def __exit__(self, exc_type, exc_value, traceback) -> bool: + def __exit__(self, exc_type, exc_value, traceback) -> Optional[bool]: return self.conn.__exit__(exc_type, exc_value, traceback) # Proxy through any unknown lookups to the DB conn class. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 9b16f45f3eff..43660ec4fb5c 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -146,7 +146,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): db_conn, "e2e_cross_signing_keys", "stream_id" ) - self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id") self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id") self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id") self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id") diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 43bf0f649abf..637a938bacaa 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -369,17 +369,25 @@ def get_new_events_for_appservice_txn(txn): async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: + if type not in ("read_receipt", "presence"): + raise ValueError( + "Expected type to be a valid application stream id type, got %s" + % (type,) + ) + def get_type_stream_id_for_appservice_txn(txn): stream_id_type = "%s_stream_id" % type txn.execute( - "SELECT ? FROM application_services_state WHERE as_id=?", - (stream_id_type, service.id,), + # We do NOT want to escape `stream_id_type`. + "SELECT %s FROM application_services_state WHERE as_id=?" + % stream_id_type, + (service.id,), ) - last_txn_id = txn.fetchone() - if last_txn_id is None or last_txn_id[0] is None: # no row exists + last_stream_id = txn.fetchone() + if last_stream_id is None or last_stream_id[0] is None: # no row exists return 0 else: - return int(last_txn_id[0]) + return int(last_stream_id[0]) return await self.db_pool.runInteraction( "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn @@ -388,11 +396,18 @@ def get_type_stream_id_for_appservice_txn(txn): async def set_type_stream_id_for_appservice( self, service: ApplicationService, type: str, pos: int ) -> None: + if type not in ("read_receipt", "presence"): + raise ValueError( + "Expected type to be a valid application stream id type, got %s" + % (type,) + ) + def set_type_stream_id_for_appservice_txn(txn): stream_id_type = "%s_stream_id" % type txn.execute( - "UPDATE ? SET device_list_stream_id = ? WHERE as_id=?", - (stream_id_type, pos, service.id), + "UPDATE application_services_state SET %s = ? WHERE as_id=?" + % stream_id_type, + (pos, service.id), ) await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 9e66e6648a19..339bd691a4c1 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -19,7 +19,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_tuple_comparison_clause -from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -410,8 +410,8 @@ def _prune_old_user_ips_txn(txn): class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - self.client_ip_last_seen = DeferredCache( - name="client_ip_last_seen", keylen=4, max_entries=50000 + self.client_ip_last_seen = LruCache( + cache_name="client_ip_last_seen", keylen=4, max_size=50000 ) super().__init__(database, db_conn, hs) @@ -442,7 +442,7 @@ async def insert_client_ip( if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY: return - self.client_ip_last_seen.prefill(key, now) + self.client_ip_last_seen.set(key, now) self._batch_row_update[key] = (user_agent, device_id, now) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index e662a20d24a4..dfb4f87b8f3d 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -34,8 +34,8 @@ ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder -from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -1005,8 +1005,8 @@ def __init__(self, database: DatabasePool, db_conn, hs): # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. - self.device_id_exists_cache = DeferredCache( - name="device_id_exists", keylen=2, max_entries=10000 + self.device_id_exists_cache = LruCache( + cache_name="device_id_exists", keylen=2, max_size=10000 ) async def store_device( @@ -1052,7 +1052,7 @@ async def store_device( ) if hidden: raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN) - self.device_id_exists_cache.prefill(key, True) + self.device_id_exists_cache.set(key, True) return inserted except StoreError: raise diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ba3b1769b0ed..87808c148334 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1051,9 +1051,7 @@ def _add_to_cache(self, txn, events_and_contexts): def prefill(): for cache_entry in to_prefill: - self.store._get_event_cache.prefill( - (cache_entry[0].event_id,), cache_entry - ) + self.store._get_event_cache.set((cache_entry[0].event_id,), cache_entry) txn.call_after(prefill) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 0ad9a19b3d6f..6e7f16f39c05 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -33,7 +33,10 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event from synapse.logging.context import PreserveLoggingContext, current_context -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import BackfillStream from synapse.replication.tcp.streams.events import EventsStream @@ -42,8 +45,8 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id -from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.descriptors import cached +from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -140,17 +143,13 @@ def __init__(self, database: DatabasePool, db_conn, hs): if hs.config.run_background_tasks: # We periodically clean out old transaction ID mappings self._clock.looping_call( - run_as_background_process, - 5 * 60 * 1000, - "_cleanup_old_transaction_ids", - self._cleanup_old_transaction_ids, + self._cleanup_old_transaction_ids, 5 * 60 * 1000, ) - self._get_event_cache = DeferredCache( - "*getEvent*", + self._get_event_cache = LruCache( + cache_name="*getEvent*", keylen=3, - max_entries=hs.config.caches.event_cache_size, - apply_cache_factor_from_config=False, + max_size=hs.config.caches.event_cache_size, ) self._event_fetch_lock = threading.Condition() @@ -749,7 +748,7 @@ async def _get_events_from_db(self, event_ids, allow_rejected=False): event=original_ev, redacted_event=redacted_event ) - self._get_event_cache.prefill((event_id,), cache_entry) + self._get_event_cache.set((event_id,), cache_entry) result_map[event_id] = cache_entry return result_map @@ -1375,6 +1374,7 @@ async def get_already_persisted_events( return mapping + @wrap_as_background_process("_cleanup_old_transaction_ids") async def _cleanup_old_transaction_ids(self): """Cleans out transaction id mappings older than 24hrs. """ diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 79b01d16f9d5..ab18cc4d79be 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -282,9 +282,10 @@ def _generate_user_daily_visits(txn): now = self._clock.time_msec() # A note on user_agent. Technically a given device can have multiple - # user agents, so we need to decide which one to pick. We could have handled this - # in number of ways, but given that we don't _that_ much have gone for MAX() - # For more details of the other options considered see + # user agents, so we need to decide which one to pick. We could have + # handled this in number of ways, but given that we don't care + # _that_ much we have gone for MAX(). For more details of the other + # options considered see # https://github.com/matrix-org/synapse/pull/8503#discussion_r502306111 sql = """ INSERT INTO user_daily_visits (user_id, device_id, timestamp, user_agent) @@ -299,7 +300,7 @@ def _generate_user_daily_visits(txn): WHERE last_seen > ? AND last_seen <= ? AND udv.timestamp IS NULL AND users.is_guest=0 AND users.appservice_id IS NULL - GROUP BY u.user_id, u.device_id, u.user_agent + GROUP BY u.user_id, u.device_id """ # This means that the day has rolled over but there could still diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 1681caa1f031..a6d1eb908a5f 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore @@ -72,7 +72,7 @@ async def create_profile(self, user_localpart: str) -> None: ) async def set_profile_displayname( - self, user_localpart: str, new_displayname: str + self, user_localpart: str, new_displayname: Optional[str] ) -> None: await self.db_pool.simple_update_one( table="profiles", @@ -144,7 +144,7 @@ async def is_subscribed_remote_profile_for_user(self, user_id): async def get_remote_profile_cache_entries_that_expire( self, last_checked: int - ) -> Dict[str, str]: + ) -> List[Dict[str, str]]: """Get all users who haven't been checked since `last_checked` """ diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index df8609b97bea..7997242d90b6 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -303,7 +303,7 @@ async def add_pusher( lock=False, ) - user_has_pusher = self.get_if_user_has_pusher.cache.get( + user_has_pusher = self.get_if_user_has_pusher.cache.get_immediate( (user_id,), None, update_metrics=False ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 5cdf16521c3f..ca7917c9895b 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -25,7 +25,6 @@ from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -413,18 +412,10 @@ def _invalidate_get_users_with_receipts_in_room( if receipt_type != "m.read": return - # Returns either an ObservableDeferred or the raw result - res = self.get_users_with_read_receipts_in_room.cache.get( + res = self.get_users_with_read_receipts_in_room.cache.get_immediate( room_id, None, update_metrics=False ) - # first handle the ObservableDeferred case - if isinstance(res, ObservableDeferred): - if res.has_called(): - res = res.get_result() - else: - res = None - if res and user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 4c843b76798c..b0329e17ec6e 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -16,29 +16,33 @@ # limitations under the License. import logging import re -from typing import Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool -from synapse.storage.types import Cursor +from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore +from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.types import Connection, Cursor +from synapse.storage.util.id_generators import IdGenerator from synapse.storage.util.sequence import build_sequence_generator from synapse.types import UserID from synapse.util.caches.descriptors import cached +if TYPE_CHECKING: + from synapse.server import HomeServer + THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 logger = logging.getLogger(__name__) -class RegistrationWorkerStore(SQLBaseStore): - def __init__(self, database: DatabasePool, db_conn, hs): +class RegistrationWorkerStore(CacheInvalidationWorkerStore): + def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): super().__init__(database, db_conn, hs) self.config = hs.config - self.clock = hs.get_clock() # Note: we don't check this sequence for consistency as we'd have to # call `find_max_generated_user_id_localpart` each time, which is @@ -55,7 +59,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): # Create a background job for culling expired 3PID validity tokens if hs.config.run_background_tasks: - self.clock.looping_call( + self._clock.looping_call( self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS ) @@ -92,7 +96,7 @@ async def is_trial_user(self, user_id: str) -> bool: if not info: return False - now = self.clock.time_msec() + now = self._clock.time_msec() trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000 is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms return is_trial @@ -257,7 +261,7 @@ def select_users_txn(txn, now_ms, renew_at): return await self.db_pool.runInteraction( "get_users_expiring_soon", select_users_txn, - self.clock.time_msec(), + self._clock.time_msec(), self.config.account_validity.renew_at, ) @@ -328,13 +332,17 @@ def set_server_admin_txn(txn): await self.db_pool.runInteraction("set_server_admin", set_server_admin_txn) def _query_for_auth(self, txn, token): - sql = ( - "SELECT users.name, users.is_guest, users.shadow_banned, access_tokens.id as token_id," - " access_tokens.device_id, access_tokens.valid_until_ms" - " FROM users" - " INNER JOIN access_tokens on users.name = access_tokens.user_id" - " WHERE token = ?" - ) + sql = """ + SELECT users.name, + users.is_guest, + users.shadow_banned, + access_tokens.id as token_id, + access_tokens.device_id, + access_tokens.valid_until_ms + FROM users + INNER JOIN access_tokens on users.name = access_tokens.user_id + WHERE token = ? + """ txn.execute(sql, (token,)) rows = self.db_pool.cursor_to_dict(txn) @@ -803,7 +811,7 @@ def cull_expired_threepid_validation_tokens_txn(txn, ts): await self.db_pool.runInteraction( "cull_expired_threepid_validation_tokens", cull_expired_threepid_validation_tokens_txn, - self.clock.time_msec(), + self._clock.time_msec(), ) @wrap_as_background_process("account_validity_set_expiration_dates") @@ -890,10 +898,10 @@ async def del_user_pending_deactivation(self, user_id: str) -> None: class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): - def __init__(self, database: DatabasePool, db_conn, hs): + def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): super().__init__(database, db_conn, hs) - self.clock = hs.get_clock() + self._clock = hs.get_clock() self.config = hs.config self.db_pool.updates.register_background_index_update( @@ -1016,13 +1024,56 @@ def _bg_user_threepids_grandfather_txn(txn): return 1 + async def set_user_deactivated_status( + self, user_id: str, deactivated: bool + ) -> None: + """Set the `deactivated` property for the provided user to the provided value. + + Args: + user_id: The ID of the user to set the status for. + deactivated: The value to set for `deactivated`. + """ + + await self.db_pool.runInteraction( + "set_user_deactivated_status", + self.set_user_deactivated_status_txn, + user_id, + deactivated, + ) + + def set_user_deactivated_status_txn(self, txn, user_id: str, deactivated: bool): + self.db_pool.simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"deactivated": 1 if deactivated else 0}, + ) + self._invalidate_cache_and_stream( + txn, self.get_user_deactivated_status, (user_id,) + ) + txn.call_after(self.is_guest.invalidate, (user_id,)) + + @cached() + async def is_guest(self, user_id: str) -> bool: + res = await self.db_pool.simple_select_one_onecol( + table="users", + keyvalues={"name": user_id}, + retcol="is_guest", + allow_none=True, + desc="is_guest", + ) + + return res if res else False + -class RegistrationStore(RegistrationBackgroundUpdateStore): - def __init__(self, database: DatabasePool, db_conn, hs): +class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): + def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): super().__init__(database, db_conn, hs) self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors + self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id") + async def add_access_token_to_user( self, user_id: str, @@ -1138,19 +1189,19 @@ async def register_user( def _register_user( self, txn, - user_id, - password_hash, - was_guest, - make_guest, - appservice_id, - create_profile_with_displayname, - admin, - user_type, - shadow_banned, + user_id: str, + password_hash: Optional[str], + was_guest: bool, + make_guest: bool, + appservice_id: Optional[str], + create_profile_with_displayname: Optional[str], + admin: bool, + user_type: Optional[str], + shadow_banned: bool, ): user_id_obj = UserID.from_string(user_id) - now = int(self.clock.time()) + now = int(self._clock.time()) try: if was_guest: @@ -1374,18 +1425,6 @@ def f(txn): await self.db_pool.runInteraction("delete_access_token", f) - @cached() - async def is_guest(self, user_id: str) -> bool: - res = await self.db_pool.simple_select_one_onecol( - table="users", - keyvalues={"name": user_id}, - retcol="is_guest", - allow_none=True, - desc="is_guest", - ) - - return res if res else False - async def add_user_pending_deactivation(self, user_id: str) -> None: """ Adds a user to the table of users who need to be parted from all the rooms they're @@ -1479,7 +1518,7 @@ def validate_threepid_session_txn(txn): txn, table="threepid_validation_session", keyvalues={"session_id": session_id}, - updatevalues={"validated_at": self.clock.time_msec()}, + updatevalues={"validated_at": self._clock.time_msec()}, ) return next_link @@ -1547,35 +1586,6 @@ def start_or_continue_validation_session_txn(txn): start_or_continue_validation_session_txn, ) - async def set_user_deactivated_status( - self, user_id: str, deactivated: bool - ) -> None: - """Set the `deactivated` property for the provided user to the provided value. - - Args: - user_id: The ID of the user to set the status for. - deactivated: The value to set for `deactivated`. - """ - - await self.db_pool.runInteraction( - "set_user_deactivated_status", - self.set_user_deactivated_status_txn, - user_id, - deactivated, - ) - - def set_user_deactivated_status_txn(self, txn, user_id, deactivated): - self.db_pool.simple_update_one_txn( - txn=txn, - table="users", - keyvalues={"name": user_id}, - updatevalues={"deactivated": 1 if deactivated else 0}, - ) - self._invalidate_cache_and_stream( - txn, self.get_user_deactivated_status, (user_id,) - ) - txn.call_after(self.is_guest.invalidate, (user_id,)) - def find_max_generated_user_id_localpart(cur: Cursor) -> int: """ diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 20fcdaa529ca..01d9dbb36f44 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -20,7 +20,10 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore @@ -67,16 +70,10 @@ def __init__(self, database: DatabasePool, db_conn, hs): ): self._known_servers_count = 1 self.hs.get_clock().looping_call( - run_as_background_process, - 60 * 1000, - "_count_known_servers", - self._count_known_servers, + self._count_known_servers, 60 * 1000, ) self.hs.get_clock().call_later( - 1000, - run_as_background_process, - "_count_known_servers", - self._count_known_servers, + 1000, self._count_known_servers, ) LaterGauge( "synapse_federation_known_servers", @@ -85,6 +82,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): lambda: self._known_servers_count, ) + @wrap_as_background_process("_count_known_servers") async def _count_known_servers(self): """ Count the servers that this server knows about. @@ -531,7 +529,7 @@ async def _get_joined_users_from_context( # If we do then we can reuse that result and simply update it with # any membership changes in `delta_ids` if context.prev_group and context.delta_ids: - prev_res = self._get_joined_users_from_context.cache.get( + prev_res = self._get_joined_users_from_context.cache.get_immediate( (room_id, context.prev_group), None ) if prev_res and isinstance(prev_res, dict): diff --git a/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql b/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql similarity index 80% rename from synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql rename to synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql index 20f5a95a24f8..7b84a207fd82 100644 --- a/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql +++ b/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql @@ -13,6 +13,5 @@ * limitations under the License. */ -ALTER TABLE application_services_state - ADD COLUMN read_receipt_stream_id INT, - ADD COLUMN presence_stream_id INT; \ No newline at end of file +ALTER TABLE application_services_state ADD COLUMN read_receipt_stream_id INT; +ALTER TABLE application_services_state ADD COLUMN presence_stream_id INT; \ No newline at end of file diff --git a/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql b/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql new file mode 100644 index 000000000000..01ea6eddcf49 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql @@ -0,0 +1 @@ +DROP TABLE device_max_stream_id; diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 970bb1b9da35..9cadcba18fc0 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable, Iterator, List, Tuple +from typing import Any, Iterable, Iterator, List, Optional, Tuple from typing_extensions import Protocol @@ -65,5 +65,5 @@ def rollback(self, *args, **kwargs) -> None: def __enter__(self) -> "Connection": ... - def __exit__(self, exc_type, exc_value, traceback) -> bool: + def __exit__(self, exc_type, exc_value, traceback) -> Optional[bool]: ... diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 4026e1f8fadc..601305487c55 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -17,11 +17,21 @@ import enum import threading -from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, cast +from typing import ( + Callable, + Generic, + Iterable, + MutableMapping, + Optional, + TypeVar, + Union, + cast, +) from prometheus_client import Gauge from twisted.internet import defer +from twisted.python import failure from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.lrucache import LruCache @@ -33,7 +43,7 @@ ["name"], ) - +T = TypeVar("T") KT = TypeVar("KT") VT = TypeVar("VT") @@ -48,7 +58,7 @@ class DeferredCache(Generic[KT, VT]): """Wraps an LruCache, adding support for Deferred results. It expects that each entry added with set() will be a Deferred; likewise get() - may return an ObservableDeferred. + will return a Deferred. """ __slots__ = ( @@ -119,21 +129,27 @@ def check_thread(self): def get( self, key: KT, - default=_Sentinel.sentinel, callback: Optional[Callable[[], None]] = None, update_metrics: bool = True, - ): + ) -> defer.Deferred: """Looks the key up in the caches. + For symmetry with set(), this method does *not* follow the synapse logcontext + rules: the logcontext will not be cleared on return, and the Deferred will run + its callbacks in the sentinel context. In other words: wrap the result with + make_deferred_yieldable() before `await`ing it. + Args: - key(tuple) - default: What is returned if key is not in the caches. If not - specified then function throws KeyError instead - callback(fn): Gets called when the entry in the cache is invalidated + key: + callback: Gets called when the entry in the cache is invalidated update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either an ObservableDeferred or the result itself + A Deferred which completes with the result. Note that this may later fail + if there is an ongoing set() operation which later completes with a failure. + + Raises: + KeyError if the key is not found in the cache """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) @@ -143,35 +159,83 @@ def get( m = self.cache.metrics assert m # we always have a name, so should always have metrics m.inc_hits() - return val.deferred + return val.deferred.observe() - val = self.cache.get( - key, default, callbacks=callbacks, update_metrics=update_metrics + val2 = self.cache.get( + key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics ) - if val is _Sentinel.sentinel: + if val2 is _Sentinel.sentinel: raise KeyError() else: - return val + return defer.succeed(val2) + + def get_immediate( + self, key: KT, default: T, update_metrics: bool = True + ) -> Union[VT, T]: + """If we have a *completed* cached value, return it.""" + return self.cache.get(key, default, update_metrics=update_metrics) def set( self, key: KT, value: defer.Deferred, callback: Optional[Callable[[], None]] = None, - ) -> ObservableDeferred: + ) -> defer.Deferred: + """Adds a new entry to the cache (or updates an existing one). + + The given `value` *must* be a Deferred. + + First any existing entry for the same key is invalidated. Then a new entry + is added to the cache for the given key. + + Until the `value` completes, calls to `get()` for the key will also result in an + incomplete Deferred, which will ultimately complete with the same result as + `value`. + + If `value` completes successfully, subsequent calls to `get()` will then return + a completed deferred with the same result. If it *fails*, the cache is + invalidated and subequent calls to `get()` will raise a KeyError. + + If another call to `set()` happens before `value` completes, then (a) any + invalidation callbacks registered in the interim will be called, (b) any + `get()`s in the interim will continue to complete with the result from the + *original* `value`, (c) any future calls to `get()` will complete with the + result from the *new* `value`. + + It is expected that `value` does *not* follow the synapse logcontext rules - ie, + if it is incomplete, it runs its callbacks in the sentinel context. + + Args: + key: Key to be set + value: a deferred which will complete with a result to add to the cache + callback: An optional callback to be called when the entry is invalidated + """ if not isinstance(value, defer.Deferred): raise TypeError("not a Deferred") callbacks = [callback] if callback else [] self.check_thread() - observable = ObservableDeferred(value, consumeErrors=True) - observer = observable.observe() - entry = CacheEntry(deferred=observable, callbacks=callbacks) existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: existing_entry.invalidate() + # XXX: why don't we invalidate the entry in `self.cache` yet? + + # we can save a whole load of effort if the deferred is ready. + if value.called: + result = value.result + if not isinstance(result, failure.Failure): + self.cache.set(key, result, callbacks) + return value + + # otherwise, we'll add an entry to the _pending_deferred_cache for now, + # and add callbacks to add it to the cache properly later. + + observable = ObservableDeferred(value, consumeErrors=True) + observer = observable.observe() + entry = CacheEntry(deferred=observable, callbacks=callbacks) + self._pending_deferred_cache[key] = entry def compare_and_pop(): @@ -215,7 +279,9 @@ def eb(_fail): # _pending_deferred_cache to the real cache. # observer.addCallbacks(cb, eb) - return observable + + # we return a new Deferred which will be called before any subsequent observers. + return observable.observe() def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): callbacks = [callback] if callback else [] diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1f438868047b..5d7fffee66ea 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -23,7 +23,6 @@ from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) @@ -156,7 +155,7 @@ def __get__(self, obj, owner): keylen=self.num_args, tree=self.tree, iterable=self.iterable, - ) # type: DeferredCache[Tuple, Any] + ) # type: DeferredCache[CacheKey, Any] def get_cache_key_gen(args, kwargs): """Given some args/kwargs return a generator that resolves into @@ -202,32 +201,20 @@ def _wrapped(*args, **kwargs): cache_key = get_cache_key(args, kwargs) - # Add our own `cache_context` to argument list if the wrapped function - # has asked for one - if self.add_cache_context: - kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) - try: - cached_result_d = cache.get(cache_key, callback=invalidate_callback) - - if isinstance(cached_result_d, ObservableDeferred): - observer = cached_result_d.observe() - else: - observer = defer.succeed(cached_result_d) - + ret = cache.get(cache_key, callback=invalidate_callback) except KeyError: - ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) + # Add our own `cache_context` to argument list if the wrapped function + # has asked for one + if self.add_cache_context: + kwargs["cache_context"] = _CacheContext.get_instance( + cache, cache_key + ) - def onErr(f): - cache.invalidate(cache_key) - return f - - ret.addErrback(onErr) - - result_d = cache.set(cache_key, ret, callback=invalidate_callback) - observer = result_d.observe() + ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) + ret = cache.set(cache_key, ret, callback=invalidate_callback) - return make_deferred_yieldable(observer) + return make_deferred_yieldable(ret) wrapped = cast(_CachedFunction, _wrapped) @@ -286,7 +273,7 @@ def __init__(self, orig, cached_method_name, list_name, num_args=None): def __get__(self, obj, objtype=None): cached_method = getattr(obj, self.cached_method_name) - cache = cached_method.cache + cache = cached_method.cache # type: DeferredCache[CacheKey, Any] num_args = cached_method.num_args @functools.wraps(self.orig) @@ -326,14 +313,11 @@ def arg_to_cache_key(arg): for arg in list_args: try: res = cache.get(arg_to_cache_key(arg), callback=invalidate_callback) - if not isinstance(res, ObservableDeferred): - results[arg] = res - elif not res.has_succeeded(): - res = res.observe() + if not res.called: res.addCallback(update_results_dict, arg) cached_defers.append(res) else: - results[arg] = res.get_result() + results[arg] = res.result except KeyError: missing.add(arg) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 4e95dd9bf391..60bb6ff642f2 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -124,6 +124,10 @@ def __init__( else: self.max_size = int(max_size) + # register_cache might call our "set_cache_factor" callback; there's nothing to + # do yet when we get resized. + self._on_resize = None # type: Optional[Callable[[],None]] + if cache_name is not None: metrics = register_cache( "lru_cache", @@ -332,11 +336,17 @@ def cache_contains(key: KT) -> bool: return key in cache self.sentinel = object() + + # make sure that we clear out any excess entries after we get resized. self._on_resize = evict + self.get = cache_get self.set = cache_set self.setdefault = cache_set_default self.pop = cache_pop + # `invalidate` is exposed for consistency with DeferredCache, so that it can be + # invalidated by the cache invalidation replication stream. + self.invalidate = cache_pop if cache_type is TreeCache: self.del_multi = cache_del_multi self.len = synchronized(cache_len) @@ -380,6 +390,7 @@ def set_cache_factor(self, factor: float) -> bool: new_size = int(self._original_max_size * factor) if new_size != self.max_size: self.max_size = new_size - self._on_resize() + if self._on_resize: + self._on_resize() return True return False diff --git a/synmark/__init__.py b/synmark/__init__.py index 53698bd5ab5a..09bc7e7927e9 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -15,7 +15,10 @@ import sys -from twisted.internet import epollreactor +try: + from twisted.internet.epollreactor import EPollReactor as Reactor +except ImportError: + from twisted.internet.pollreactor import PollReactor as Reactor from twisted.internet.main import installReactor from synapse.config.homeserver import HomeServerConfig @@ -41,7 +44,7 @@ async def make_homeserver(reactor, config=None): config_obj = HomeServerConfig() config_obj.parse_config_dict(config, "", "") - hs = await setup_test_homeserver( + hs = setup_test_homeserver( cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock ) stor = hs.get_datastore() @@ -63,7 +66,7 @@ def make_reactor(): Instantiate and install a Twisted reactor suitable for testing (i.e. not the default global one). """ - reactor = epollreactor.EPollReactor() + reactor = Reactor() if "twisted.internet.reactor" in sys.modules: del sys.modules["twisted.internet.reactor"] diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 2acb8b7603b0..97f8cad0ddd4 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -260,6 +260,31 @@ def do_send(x, y, z): self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) self.assertEquals(3, self.txn_ctrl.send.call_count) + def test_send_large_txns(self): + srv_1_defer = defer.Deferred() + srv_2_defer = defer.Deferred() + send_return_list = [srv_1_defer, srv_2_defer] + + def do_send(x, y, z): + return make_deferred_yieldable(send_return_list.pop(0)) + + self.txn_ctrl.send = Mock(side_effect=do_send) + + service = Mock(id=4, name="service") + event_list = [Mock(name="event%i" % (i + 1)) for i in range(200)] + for event in event_list: + self.queuer.enqueue_event(service, event) + + # Expect the first event to be sent immediately. + self.txn_ctrl.send.assert_called_with(service, [event_list[0]], []) + srv_1_defer.callback(service) + # Then send the next 100 events + self.txn_ctrl.send.assert_called_with(service, event_list[1:101], []) + srv_2_defer.callback(service) + # Then the final 99 events + self.txn_ctrl.send.assert_called_with(service, event_list[101:], []) + self.assertEquals(3, self.txn_ctrl.send.call_count) + def test_send_single_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") @@ -296,3 +321,19 @@ def test_send_single_ephemeral_with_queue(self): # Expect the queued events to be sent self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) self.assertEquals(2, self.txn_ctrl.send.call_count) + + def test_send_large_txns_ephemeral(self): + d = defer.Deferred() + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + first_chunk = [Mock(name="event%i" % (i + 1)) for i in range(100)] + second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)] + event_list = first_chunk + second_chunk + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk) + d.callback(service) + self.txn_ctrl.send.assert_called_with(service, [], second_chunk) + self.assertEquals(2, self.txn_ctrl.send.call_count) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index ee4f3da31c83..53763cd0f989 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -42,7 +42,6 @@ def setUp(self): hs.get_clock.return_value = MockClock() self.handler = ApplicationServicesHandler(hs) - @defer.inlineCallbacks def test_notify_interested_services(self): interested_service = self._mkservice(is_interested=True) services = [ @@ -62,14 +61,12 @@ def test_notify_interested_services(self): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred( - self.handler.notify_interested_services(RoomStreamToken(None, 0)) - ) + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.mock_scheduler.submit_event_for_as.assert_called_once_with( interested_service, event ) - @defer.inlineCallbacks def test_query_user_exists_unknown_user(self): user_id = "@someone:anywhere" services = [self._mkservice(is_interested=True)] @@ -83,12 +80,11 @@ def test_query_user_exists_unknown_user(self): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred( - self.handler.notify_interested_services(RoomStreamToken(None, 0)) - ) + + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) - @defer.inlineCallbacks def test_query_user_exists_known_user(self): user_id = "@someone:anywhere" services = [self._mkservice(is_interested=True)] @@ -102,9 +98,9 @@ def test_query_user_exists_known_user(self): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred( - self.handler.notify_interested_services(RoomStreamToken(None, 0)) - ) + + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.assertFalse( self.mock_as_api.query_user.called, "query_user called when it shouldn't have been.", diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index b6f436c01678..0d517058490e 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -394,7 +394,14 @@ def test_callback(self): self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) self.handler._auth_handler.complete_sso_login = simple_async_mock() request = Mock( - spec=["args", "getCookie", "addCookie", "requestHeaders", "getClientIP"] + spec=[ + "args", + "getCookie", + "addCookie", + "requestHeaders", + "getClientIP", + "get_user_agent", + ] ) code = "code" @@ -414,9 +421,8 @@ def test_callback(self): request.args[b"code"] = [code.encode("utf-8")] request.args[b"state"] = [state.encode("utf-8")] - request.requestHeaders = Mock(spec=["getRawHeaders"]) - request.requestHeaders.getRawHeaders.return_value = [user_agent.encode("ascii")] request.getClientIP.return_value = ip_address + request.get_user_agent.return_value = user_agent self.get_success(self.handler.handle_oidc_callback(request)) @@ -621,7 +627,14 @@ def test_extra_attributes(self): self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) self.handler._auth_handler.complete_sso_login = simple_async_mock() request = Mock( - spec=["args", "getCookie", "addCookie", "requestHeaders", "getClientIP"] + spec=[ + "args", + "getCookie", + "addCookie", + "requestHeaders", + "getClientIP", + "get_user_agent", + ] ) state = "state" @@ -637,9 +650,8 @@ def test_extra_attributes(self): request.args[b"code"] = [b"code"] request.args[b"state"] = [state.encode("utf-8")] - request.requestHeaders = Mock(spec=["getRawHeaders"]) - request.requestHeaders.getRawHeaders.return_value = [b"Browser"] request.getClientIP.return_value = "10.0.0.1" + request.get_user_agent.return_value = "Browser" self.get_success(self.handler.handle_oidc_callback(request)) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 4cf81f71284b..fd128b88e0c0 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -78,7 +78,7 @@ def test_log_output(self): "server_name", "name", ] - self.assertEqual(set(log.keys()), set(expected_log_keys)) + self.assertCountEqual(log.keys(), expected_log_keys) # It contains the data we expect. self.assertEqual(log["name"], "wally") diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 322456864065..d9993e624521 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -131,6 +131,35 @@ def test_simple_sends_email(self): # We should get emailed about that message self._check_for_mail() + def test_invite_sends_email(self): + # Create a room and invite the user to it + room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) + self.helper.invite( + room=room, + src=self.others[0].id, + tok=self.others[0].token, + targ=self.user_id, + ) + + # We should get emailed about the invite + self._check_for_mail() + + def test_invite_to_empty_room_sends_email(self): + # Create a room and invite the user to it + room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) + self.helper.invite( + room=room, + src=self.others[0].id, + tok=self.others[0].token, + targ=self.user_id, + ) + + # Then have the original user leave + self.helper.leave(room, self.others[0].id, tok=self.others[0].token) + + # We should get emailed about the invite + self._check_for_mail() + def test_multiple_members_email(self): # We want to test multiple notifications, so we pause processing of push # while we send messages. @@ -158,8 +187,21 @@ def test_multiple_members_email(self): # We should get emailed about those messages self._check_for_mail() + def test_encrypted_message(self): + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) + + # The other user sends some messages + self.helper.send_event(room, "m.room.encrypted", {}, tok=self.others[0].token) + + # We should get emailed about that message + self._check_for_mail() + def _check_for_mail(self): - "Check that the user receives an email notification" + """Check that the user receives an email notification""" # Get the stream ordering before it gets sent pushers = self.get_success( diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 92c9058887e2..d89eb90cfef5 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -393,6 +393,22 @@ def test_user_is_not_local(self): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only lookup local users", channel.json_body["error"]) + def test_user_has_no_devices(self): + """ + Tests that a normal lookup for devices is successfully + if user has no devices + """ + + # Get devices + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["total"]) + self.assertEqual(0, len(channel.json_body["devices"])) + def test_get_devices(self): """ Tests that a normal lookup for devices is successfully @@ -409,6 +425,7 @@ def test_get_devices(self): self.render(request) self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(number_devices, channel.json_body["total"]) self.assertEqual(number_devices, len(channel.json_body["devices"])) self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"]) # Check that all fields are available diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index ae2cd67f35de..66ac4dbe858d 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -352,7 +352,6 @@ def test_deactivate_account(self): self.render(request) self.assertEqual(request.code, 401) - @unittest.INFO def test_pending_invites(self): """Tests that deactivating a user rejects every pending invite for them.""" store = self.hs.get_datastore() diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 293ccfba2bb1..86184f0d2ec7 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -104,7 +104,6 @@ def recaptcha( self.assertEqual(len(attempts), 1) self.assertEqual(attempts[0][0]["response"], "a") - @unittest.INFO def test_fallback_captcha(self): """Ensure that fallback auth via a captcha works.""" # Returns a 401 as per the spec diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 8e69b1e9cc91..1ac4ebc61d4c 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -15,237 +15,9 @@ # limitations under the License. -from mock import Mock - -from twisted.internet import defer - -from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import cached - from tests import unittest -class CacheDecoratorTestCase(unittest.HomeserverTestCase): - @defer.inlineCallbacks - def test_passthrough(self): - class A: - @cached() - def func(self, key): - return key - - a = A() - - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals((yield a.func("bar")), "bar") - - @defer.inlineCallbacks - def test_hit(self): - callcount = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - a = A() - yield a.func("foo") - - self.assertEquals(callcount[0], 1) - - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals(callcount[0], 1) - - @defer.inlineCallbacks - def test_invalidate(self): - callcount = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - a = A() - yield a.func("foo") - - self.assertEquals(callcount[0], 1) - - a.func.invalidate(("foo",)) - - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - - def test_invalidate_missing(self): - class A: - @cached() - def func(self, key): - return key - - A().func.invalidate(("what",)) - - @defer.inlineCallbacks - def test_max_entries(self): - callcount = [0] - - class A: - @cached(max_entries=10) - def func(self, key): - callcount[0] += 1 - return key - - a = A() - - for k in range(0, 12): - yield a.func(k) - - self.assertEquals(callcount[0], 12) - - # There must have been at least 2 evictions, meaning if we calculate - # all 12 values again, we must get called at least 2 more times - for k in range(0, 12): - yield a.func(k) - - self.assertTrue( - callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) - ) - - def test_prefill(self): - callcount = [0] - - d = defer.succeed(123) - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return d - - a = A() - - a.func.prefill(("foo",), ObservableDeferred(d)) - - self.assertEquals(a.func("foo").result, d.result) - self.assertEquals(callcount[0], 0) - - @defer.inlineCallbacks - def test_invalidate_context(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - yield a.func2("foo") - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) - - a.func.invalidate(("foo",)) - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 1) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - @defer.inlineCallbacks - def test_eviction_context(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached(max_entries=2) - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - yield a.func2("foo") - yield a.func2("foo2") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func("foo3") - - self.assertEquals(callcount[0], 3) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 4) - self.assertEquals(callcount2[0], 3) - - @defer.inlineCallbacks - def test_double_get(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - a.func2.cache.cache = Mock(wraps=a.func2.cache.cache) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) - - a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 1) - - yield a.func2("foo") - a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 2) - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 2) - - a.func.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 3) - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 3) - - class UpsertManyTests(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.storage = hs.get_datastore() diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c5c79873495d..1ce29af5fd9d 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -410,6 +410,62 @@ def test_get_appservices_by_state_multiple(self): ) +class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def prepare(self, hs, reactor, clock): + self.service = Mock(id="foo") + self.store = self.hs.get_datastore() + self.get_success(self.store.set_appservice_state(self.service, "up")) + + def test_get_type_stream_id_for_appservice_no_value(self): + value = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") + ) + self.assertEquals(value, 0) + + value = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "presence") + ) + self.assertEquals(value, 0) + + def test_get_type_stream_id_for_appservice_invalid_type(self): + self.get_failure( + self.store.get_type_stream_id_for_appservice(self.service, "foobar"), + ValueError, + ) + + def test_set_type_stream_id_for_appservice(self): + read_receipt_value = 1024 + self.get_success( + self.store.set_type_stream_id_for_appservice( + self.service, "read_receipt", read_receipt_value + ) + ) + result = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") + ) + self.assertEqual(result, read_receipt_value) + + self.get_success( + self.store.set_type_stream_id_for_appservice( + self.service, "presence", read_receipt_value + ) + ) + result = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "presence") + ) + self.assertEqual(result, read_receipt_value) + + def test_set_type_stream_id_for_appservice_invalid_type(self): + self.get_failure( + self.store.set_type_stream_id_for_appservice(self.service, "foobar", 1024), + ValueError, + ) + + # required for ApplicationServiceTransactionStoreTestCase tests class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): def __init__(self, database: DatabasePool, db_conn, hs): diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 080761d1d2dc..5a1e5c4e66c8 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -22,7 +22,7 @@ from synapse.api.constants import EventTypes from synapse.rest.client.v1 import login, room from synapse.storage import prepare_database -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from tests.unittest import HomeserverTestCase @@ -38,7 +38,7 @@ def prepare(self, reactor, clock, homeserver): # Create a test user and room self.user = UserID("alice", "test") - self.requester = Requester(self.user, None, False, False, None, None) + self.requester = create_requester(self.user) info, _ = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] @@ -260,7 +260,7 @@ def prepare(self, reactor, clock, homeserver): # Create a test user and room self.user = UserID.from_string(self.register_user("user1", "password")) self.token1 = self.login("user1", "password") - self.requester = Requester(self.user, None, False, False, None, None) + self.requester = create_requester(self.user) info, _ = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] self.event_creator = homeserver.get_event_creation_handler() diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 3957471f3fbc..7691f2d790fb 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -14,7 +14,7 @@ # limitations under the License. from synapse.metrics import REGISTRY, generate_latest -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from tests.unittest import HomeserverTestCase @@ -27,7 +27,7 @@ def test_exposed_to_prometheus(self): room_creator = self.hs.get_room_creation_handler() user = UserID("alice", "test") - requester = Requester(user, None, False, False, None, None) + requester = create_requester(user) # Real events, forward extremities events = [(3, 2), (6, 2), (4, 6)] diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 12ccc1f53e99..ff972daeaa68 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -19,7 +19,7 @@ from synapse.api.constants import Membership from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client.v1 import login, room -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from tests import unittest from tests.test_utils import event_injection @@ -187,7 +187,7 @@ def test_can_rerun_update(self): # Now let's create a room, which will insert a membership user = UserID("alice", "test") - requester = Requester(user, None, False, False, None, None) + requester = create_requester(user) self.get_success(self.room_creator.create_room(requester, {})) # Register the background update to run again. diff --git a/tests/test_federation.py b/tests/test_federation.py index d39e79258041..1ce4ea3a0142 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -20,7 +20,7 @@ from synapse.api.errors import FederationError from synapse.events import make_event_from_dict from synapse.logging.context import LoggingContext -from synapse.types import Requester, UserID +from synapse.types import UserID, create_requester from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination @@ -43,7 +43,7 @@ def setUp(self): ) user_id = UserID("us", "test") - our_user = Requester(user_id, None, False, False, None, None) + our_user = create_requester(user_id) room_creator = self.homeserver.get_room_creation_handler() self.room_id = self.get_success( room_creator.create_room( diff --git a/tests/unittest.py b/tests/unittest.py index 040b126a27bc..257f46589763 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -44,7 +44,7 @@ set_current_context, ) from synapse.server import HomeServer -from synapse.types import Requester, UserID, create_requester +from synapse.types import UserID, create_requester from synapse.util.ratelimitutils import FederationRateLimiter from tests.server import ( @@ -627,7 +627,7 @@ def create_and_send_event( """ event_creator = self.hs.get_event_creation_handler() secrets = self.hs.get_secrets() - requester = Requester(user, None, False, False, None, None) + requester = create_requester(user) event, context = self.get_success( event_creator.create_event( diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 9717be56b6cd..dadfabd46d1d 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest from functools import partial from twisted.internet import defer from synapse.util.caches.deferred_cache import DeferredCache +from tests.unittest import TestCase -class DeferredCacheTestCase(unittest.TestCase): + +class DeferredCacheTestCase(TestCase): def test_empty(self): cache = DeferredCache("test") failed = False @@ -36,7 +37,118 @@ def test_hit(self): cache = DeferredCache("test") cache.prefill("foo", 123) - self.assertEquals(cache.get("foo"), 123) + self.assertEquals(self.successResultOf(cache.get("foo")), 123) + + def test_hit_deferred(self): + cache = DeferredCache("test") + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d) + + # get should return an incomplete deferred + get_d = cache.get("k1") + self.assertFalse(get_d.called) + + # add a callback that will make sure that the set_d gets called before the get_d + def check1(r): + self.assertTrue(set_d.called) + return r + + # TODO: Actually ObservableDeferred *doesn't* run its tests in order on py3.8. + # maybe we should fix that? + # get_d.addCallback(check1) + + # now fire off all the deferreds + origin_d.callback(99) + self.assertEqual(self.successResultOf(origin_d), 99) + self.assertEqual(self.successResultOf(set_d), 99) + self.assertEqual(self.successResultOf(get_d), 99) + + def test_callbacks(self): + """Invalidation callbacks are called at the right time""" + cache = DeferredCache("test") + callbacks = set() + + # start with an entry, with a callback + cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) + + # now replace that entry with a pending result + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) + + # ... and also make a get request + get_d = cache.get("k1", callback=lambda: callbacks.add("get")) + + # we don't expect the invalidation callback for the original value to have + # been called yet, even though get() will now return a different result. + # I'm not sure if that is by design or not. + self.assertEqual(callbacks, set()) + + # now fire off all the deferreds + origin_d.callback(20) + self.assertEqual(self.successResultOf(set_d), 20) + self.assertEqual(self.successResultOf(get_d), 20) + + # now the original invalidation callback should have been called, but none of + # the others + self.assertEqual(callbacks, {"prefill"}) + callbacks.clear() + + # another update should invalidate both the previous results + cache.prefill("k1", 30) + self.assertEqual(callbacks, {"set", "get"}) + + def test_set_fail(self): + cache = DeferredCache("test") + callbacks = set() + + # start with an entry, with a callback + cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) + + # now replace that entry with a pending result + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) + + # ... and also make a get request + get_d = cache.get("k1", callback=lambda: callbacks.add("get")) + + # none of the callbacks should have been called yet + self.assertEqual(callbacks, set()) + + # oh noes! fails! + e = Exception("oops") + origin_d.errback(e) + self.assertIs(self.failureResultOf(set_d, Exception).value, e) + self.assertIs(self.failureResultOf(get_d, Exception).value, e) + + # the callbacks for the failed requests should have been called. + # I'm not sure if this is deliberate or not. + self.assertEqual(callbacks, {"get", "set"}) + callbacks.clear() + + # the old value should still be returned now? + get_d2 = cache.get("k1", callback=lambda: callbacks.add("get2")) + self.assertEqual(self.successResultOf(get_d2), 10) + + # replacing the value now should run the callbacks for those requests + # which got the original result + cache.prefill("k1", 30) + self.assertEqual(callbacks, {"prefill", "get2"}) + + def test_get_immediate(self): + cache = DeferredCache("test") + d1 = defer.Deferred() + cache.set("key1", d1) + + # get_immediate should return default + v = cache.get_immediate("key1", 1) + self.assertEqual(v, 1) + + # now complete the set + d1.callback(2) + + # get_immediate should return result + v = cache.get_immediate("key1", 1) + self.assertEqual(v, 2) def test_invalidate(self): cache = DeferredCache("test") @@ -66,23 +178,24 @@ def record_callback(idx): d2 = defer.Deferred() cache.set("key2", d2, partial(record_callback, 1)) - # lookup should return observable deferreds - self.assertFalse(cache.get("key1").has_called()) - self.assertFalse(cache.get("key2").has_called()) + # lookup should return pending deferreds + self.assertFalse(cache.get("key1").called) + self.assertFalse(cache.get("key2").called) # let one of the lookups complete d2.callback("result2") - # for now at least, the cache will return real results rather than an - # observabledeferred - self.assertEqual(cache.get("key2"), "result2") + # now the cache will return a completed deferred + self.assertEqual(self.successResultOf(cache.get("key2")), "result2") # now do the invalidation cache.invalidate_all() - # lookup should return none - self.assertIsNone(cache.get("key1", None)) - self.assertIsNone(cache.get("key2", None)) + # lookup should fail + with self.assertRaises(KeyError): + cache.get("key1") + with self.assertRaises(KeyError): + cache.get("key2") # both callbacks should have been callbacked self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") @@ -90,7 +203,8 @@ def record_callback(idx): # letting the other lookup complete should do nothing d1.callback("result1") - self.assertIsNone(cache.get("key1", None)) + with self.assertRaises(KeyError): + cache.get("key1", None) def test_eviction(self): cache = DeferredCache( diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3d1f960869da..2ad08f541bb9 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Set import mock @@ -130,6 +131,57 @@ def fn(self, arg1): d = obj.fn(1) self.failureResultOf(d, SynapseError) + def test_cache_with_async_exception(self): + """The wrapped function returns a failure + """ + + class Cls: + result = None + call_count = 0 + + @cached() + def fn(self, arg1): + self.call_count += 1 + return self.result + + obj = Cls() + callbacks = set() # type: Set[str] + + # set off an asynchronous request + obj.result = origin_d = defer.Deferred() + + d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1")) + self.assertFalse(d1.called) + + # a second request should also return a deferred, but should not call the + # function itself. + d2 = obj.fn(1, on_invalidate=lambda: callbacks.add("d2")) + self.assertFalse(d2.called) + self.assertEqual(obj.call_count, 1) + + # no callbacks yet + self.assertEqual(callbacks, set()) + + # the original request fails + e = Exception("bzz") + origin_d.errback(e) + + # ... which should cause the lookups to fail similarly + self.assertIs(self.failureResultOf(d1, Exception).value, e) + self.assertIs(self.failureResultOf(d2, Exception).value, e) + + # ... and the callbacks to have been, uh, called. + self.assertEqual(callbacks, {"d1", "d2"}) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should work as normal + obj.result = defer.succeed(100) + d3 = obj.fn(1) + self.assertEqual(self.successResultOf(d3), 100) + self.assertEqual(obj.call_count, 2) + def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" @@ -311,6 +363,235 @@ def fn(self, arg1): self.failureResultOf(d, SynapseError) +class CacheDecoratorTestCase(unittest.HomeserverTestCase): + """More tests for @cached + + The following is a set of tests that got lost in a different file for a while. + + There are probably duplicates of the tests in DescriptorTestCase. Ideally the + duplicates would be removed and the two sets of classes combined. + """ + + @defer.inlineCallbacks + def test_passthrough(self): + class A: + @cached() + def func(self, key): + return key + + a = A() + + self.assertEquals((yield a.func("foo")), "foo") + self.assertEquals((yield a.func("bar")), "bar") + + @defer.inlineCallbacks + def test_hit(self): + callcount = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + a = A() + yield a.func("foo") + + self.assertEquals(callcount[0], 1) + + self.assertEquals((yield a.func("foo")), "foo") + self.assertEquals(callcount[0], 1) + + @defer.inlineCallbacks + def test_invalidate(self): + callcount = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + a = A() + yield a.func("foo") + + self.assertEquals(callcount[0], 1) + + a.func.invalidate(("foo",)) + + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + + def test_invalidate_missing(self): + class A: + @cached() + def func(self, key): + return key + + A().func.invalidate(("what",)) + + @defer.inlineCallbacks + def test_max_entries(self): + callcount = [0] + + class A: + @cached(max_entries=10) + def func(self, key): + callcount[0] += 1 + return key + + a = A() + + for k in range(0, 12): + yield a.func(k) + + self.assertEquals(callcount[0], 12) + + # There must have been at least 2 evictions, meaning if we calculate + # all 12 values again, we must get called at least 2 more times + for k in range(0, 12): + yield a.func(k) + + self.assertTrue( + callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) + ) + + def test_prefill(self): + callcount = [0] + + d = defer.succeed(123) + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return d + + a = A() + + a.func.prefill(("foo",), 456) + + self.assertEquals(a.func("foo").result, 456) + self.assertEquals(callcount[0], 0) + + @defer.inlineCallbacks + def test_invalidate_context(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + yield a.func2("foo") + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 1) + + a.func.invalidate(("foo",)) + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 1) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + @defer.inlineCallbacks + def test_eviction_context(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached(max_entries=2) + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + yield a.func2("foo") + yield a.func2("foo2") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func("foo3") + + self.assertEquals(callcount[0], 3) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 4) + self.assertEquals(callcount2[0], 3) + + @defer.inlineCallbacks + def test_double_get(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + a.func2.cache.cache = mock.Mock(wraps=a.func2.cache.cache) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 1) + + a.func2.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 1) + + yield a.func2("foo") + a.func2.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 2) + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 2) + + a.func.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 3) + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 3) + + class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index f12834edab2c..a739a6aaaf8c 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -19,7 +19,8 @@ from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache -from .. import unittest +from tests import unittest +from tests.unittest import override_config class LruCacheTestCase(unittest.HomeserverTestCase): @@ -83,6 +84,11 @@ def test_clear(self): cache.clear() self.assertEquals(len(cache), 0) + @override_config({"caches": {"per_cache_factors": {"mycache": 10}}}) + def test_special_size(self): + cache = LruCache(10, "mycache") + self.assertEqual(cache.max_size, 100) + class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_get(self): diff --git a/tox.ini b/tox.ini index 4d132eff4cab..6dcc439a4038 100644 --- a/tox.ini +++ b/tox.ini @@ -158,12 +158,9 @@ commands= coverage html [testenv:mypy] -skip_install = True deps = {[base]deps} - mypy==0.782 - mypy-zope -extras = all +extras = all,mypy commands = mypy # To find all folders that pass mypy you run: