From 538b3576b34a69cdc786a5919b3b46b9f5e94387 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Aug 2020 17:09:37 +0100 Subject: [PATCH 01/10] Improve and better document the computation of max_lifetime when purging --- synapse/handlers/pagination.py | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 487420bb5d4d..2b608bd66915 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -82,6 +82,9 @@ def __init__(self, hs): self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime + self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min + self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max + if hs.config.retention_enabled: # Run the purge jobs described in the configuration file. for job in hs.config.retention_purge_jobs: @@ -152,13 +155,30 @@ async def purge_history_for_rooms_in_range(self, min_ms, max_ms): ) continue - max_lifetime = retention_policy["max_lifetime"] + # If max_lifetime is None, it means that the room has no retention policy. + # Given we only retrieve such rooms when there's a default retention policy + # defined in the server's configuration, we can safely assume that's the + # case and use it for this room. + max_lifetime = ( + retention_policy["max_lifetime"] or self._retention_default_max_lifetime + ) - if max_lifetime is None: - # If max_lifetime is None, it means that include_null equals True, - # therefore we can safely assume that there is a default policy defined - # in the server's configuration. - max_lifetime = self._retention_default_max_lifetime + # Cap the effective max_lifetime to be within the range allowed in the + # config. + # We do this in two steps: + # 1. Make sure it's higher or equal to the minimum allowed value, and if + # it's not replace it with that value. This is because the server + # operator can be required to not delete information before a given + # time, e.g. to comply with freedom of information laws. + # 2. Make sure the resulting value is lower or equal to the maximum allowed + # value, and if it's not replace it with that value. This is because the + # server operator can be required to delete any data after a specific + # amount of time. + if self._retention_allowed_lifetime_min is not None: + max_lifetime = max(self._retention_allowed_lifetime_min, max_lifetime) + + if self._retention_allowed_lifetime_max is not None: + max_lifetime = min(max_lifetime, self._retention_allowed_lifetime_max) # Figure out what token we should start purging at. ts = self.clock.time_msec() - max_lifetime From 125d6e3d7484606af8c7b4b27d2af07881b02857 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Aug 2020 17:20:16 +0100 Subject: [PATCH 02/10] Don't reject an event that doesn't match the current local config --- synapse/events/validator.py | 59 ++----------------------------------- 1 file changed, 3 insertions(+), 56 deletions(-) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 588d222f363d..5ce3874fbaf5 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -74,15 +74,14 @@ def validate_new(self, event, config): ) if event.type == EventTypes.Retention: - self._validate_retention(event, config) + self._validate_retention(event) - def _validate_retention(self, event, config): + def _validate_retention(self, event): """Checks that an event that defines the retention policy for a room respects the - boundaries imposed by the server's administrator. + format enforced by the spec. Args: event (FrozenEvent): The event to validate. - config (Config): The homeserver's configuration. """ min_lifetime = event.content.get("min_lifetime") max_lifetime = event.content.get("max_lifetime") @@ -95,32 +94,6 @@ def _validate_retention(self, event, config): errcode=Codes.BAD_JSON, ) - if ( - config.retention_allowed_lifetime_min is not None - and min_lifetime < config.retention_allowed_lifetime_min - ): - raise SynapseError( - code=400, - msg=( - "'min_lifetime' can't be lower than the minimum allowed" - " value enforced by the server's administrator" - ), - errcode=Codes.BAD_JSON, - ) - - if ( - config.retention_allowed_lifetime_max is not None - and min_lifetime > config.retention_allowed_lifetime_max - ): - raise SynapseError( - code=400, - msg=( - "'min_lifetime' can't be greater than the maximum allowed" - " value enforced by the server's administrator" - ), - errcode=Codes.BAD_JSON, - ) - if max_lifetime is not None: if not isinstance(max_lifetime, int): raise SynapseError( @@ -129,32 +102,6 @@ def _validate_retention(self, event, config): errcode=Codes.BAD_JSON, ) - if ( - config.retention_allowed_lifetime_min is not None - and max_lifetime < config.retention_allowed_lifetime_min - ): - raise SynapseError( - code=400, - msg=( - "'max_lifetime' can't be lower than the minimum allowed value" - " enforced by the server's administrator" - ), - errcode=Codes.BAD_JSON, - ) - - if ( - config.retention_allowed_lifetime_max is not None - and max_lifetime > config.retention_allowed_lifetime_max - ): - raise SynapseError( - code=400, - msg=( - "'max_lifetime' can't be greater than the maximum allowed" - " value enforced by the server's administrator" - ), - errcode=Codes.BAD_JSON, - ) - if ( min_lifetime is not None and max_lifetime is not None From e0d5183e731e205c6b1b04cf53fa427a1d765a51 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Aug 2020 18:45:58 +0100 Subject: [PATCH 03/10] Fix tests --- synapse/handlers/pagination.py | 2 + tests/rest/client/test_retention.py | 72 ++++++++++++++++++----------- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 2b608bd66915..06e08ea83dbf 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -180,6 +180,8 @@ async def purge_history_for_rooms_in_range(self, min_ms, max_ms): if self._retention_allowed_lifetime_max is not None: max_lifetime = min(max_lifetime, self._retention_allowed_lifetime_max) + logger.debug("[purge] max_lifetime for room %s: %s", room_id, max_lifetime) + # Figure out what token we should start purging at. ts = self.clock.time_msec() - max_lifetime diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index e54ffea1505d..9f10f8ed0e68 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -45,50 +45,66 @@ def make_homeserver(self, reactor, clock): } self.hs = self.setup_test_homeserver(config=config) + return self.hs def prepare(self, reactor, clock, homeserver): self.user_id = self.register_user("user", "password") self.token = self.login("user", "password") - def test_retention_state_event(self): - """Tests that the server configuration can limit the values a user can set to the - room's retention policy. + self.store = self.hs.get_datastore() + self.serializer = self.hs.get_event_client_serializer() + self.clock = self.hs.get_clock() + + def test_retention_event_purged_with_state_event(self): + """Tests that expired events are correctly purged when the room's retention policy + is defined by a state event. """ room_id = self.helper.create_room_as(self.user_id, tok=self.token) + # Set the room's retention period to 2 days. + lifetime = one_day_ms * 2 self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={"max_lifetime": one_day_ms * 4}, + body={"max_lifetime": lifetime}, tok=self.token, - expect_code=400, ) + self._test_retention_event_purged(room_id, one_day_ms * 1.5) + + def test_retention_event_purged_with_state_event_outside_allowed(self): + """Tests that the server configuration can override the policy for a room when + running the purge jobs. + """ + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + # Set a max_lifetime higher than the maximum allowed value. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={"max_lifetime": one_hour_ms}, + body={"max_lifetime": one_day_ms * 4}, tok=self.token, - expect_code=400, ) - def test_retention_event_purged_with_state_event(self): - """Tests that expired events are correctly purged when the room's retention policy - is defined by a state event. - """ - room_id = self.helper.create_room_as(self.user_id, tok=self.token) + # Check that the event is purged after waiting for the maximum allowed duration + # instead of the one specified in the room's policy. + self._test_retention_event_purged(room_id, one_day_ms * 3) - # Set the room's retention period to 2 days. - lifetime = one_day_ms * 2 + # Set a max_lifetime lower than the minimum allowed value. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={"max_lifetime": lifetime}, + body={"max_lifetime": one_hour_ms}, tok=self.token, ) - self._test_retention_event_purged(room_id, one_day_ms * 1.5) + # Check that the event hasn't been purged yet. + self._test_retention_event_purged(room_id, one_hour_ms, expect_purged=False) + + # Check that the event is purged after waiting for the minimum allowed duration + # instead of the one specified in the room's policy. + self._test_retention_event_purged(room_id, one_day_ms) def test_retention_event_purged_without_state_event(self): """Tests that expired events are correctly purged when the room's retention policy @@ -140,7 +156,7 @@ def test_visibility(self): # That event should be the second, not outdated event. self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events) - def _test_retention_event_purged(self, room_id, increment): + def _test_retention_event_purged(self, room_id, increment, expect_purged=True): # Get the create event to, later, check that we can still access it. message_handler = self.hs.get_message_handler() create_event = self.get_success( @@ -154,7 +170,7 @@ def _test_retention_event_purged(self, room_id, increment): expired_event_id = resp.get("event_id") # Check that we can retrieve the event. - expired_event = self.get_event(room_id, expired_event_id) + expired_event = self.get_event(expired_event_id) self.assertEqual( expired_event.get("content", {}).get("body"), "1", expired_event ) @@ -173,25 +189,29 @@ def _test_retention_event_purged(self, room_id, increment): self.reactor.advance(increment / 1000) # Check that the event has been purged from the database. - self.get_event(room_id, expired_event_id, expected_code=404) + self.get_event(expired_event_id, expect_none=expect_purged) # Check that the event that hasn't been purged can still be retrieved. - valid_event = self.get_event(room_id, valid_event_id) + valid_event = self.get_event(valid_event_id) self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event) # Check that we can still access state events that were sent before the event that # has been purged. self.get_event(room_id, create_event.event_id) - def get_event(self, room_id, event_id, expected_code=200): - url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) + def get_event(self, event_id, expect_none=False): + event = self.get_success(self.store.get_event(event_id, allow_none=True)) - request, channel = self.make_request("GET", url, access_token=self.token) - self.render(request) + if expect_none: + self.assertIsNone(event) + return {} - self.assertEqual(channel.code, expected_code, channel.result) + self.assertIsNotNone(event) - return channel.json_body + time_now = self.clock.time_msec() + serialized = self.get_success(self.serializer.serialize_event(event, time_now)) + + return serialized class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): From b426a52b769fc5753d087aaa3f607e0db496e57d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 17 Aug 2020 17:23:39 +0100 Subject: [PATCH 04/10] Add config tips --- docs/sample_config.yaml | 5 +++++ synapse/config/server.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index fe85978a1fb1..76f18c739278 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -408,6 +408,11 @@ retention: # (e.g. every 12h), but not want that purge to be performed by a job that's # iterating over every room it knows, which could be heavy on the server. # + # If any purge job is configured, it is strongly recommended to have at least + # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' + # set, or one job without 'shortest_max_lifetime' and one job without + # 'longest_max_lifetime' set. + # #purge_jobs: # - shortest_max_lifetime: 1d # longest_max_lifetime: 3d diff --git a/synapse/config/server.py b/synapse/config/server.py index 848587d2323c..1fe7e30a1d57 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -979,6 +979,11 @@ def generate_config_section( # (e.g. every 12h), but not want that purge to be performed by a job that's # iterating over every room it knows, which could be heavy on the server. # + # If any purge job is configured, it is strongly recommended to have at least + # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' + # set, or one job without 'shortest_max_lifetime' and one job without + # 'longest_max_lifetime' set. + # #purge_jobs: # - shortest_max_lifetime: 1d # longest_max_lifetime: 3d From 41aa8ee9547d25501ba8857c349b21afdd717a6e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 17 Aug 2020 17:24:39 +0100 Subject: [PATCH 05/10] Changelog --- changelog.d/8104.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8104.bugfix diff --git a/changelog.d/8104.bugfix b/changelog.d/8104.bugfix new file mode 100644 index 000000000000..e32e2996c447 --- /dev/null +++ b/changelog.d/8104.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.7.2 impacting message retention policies that would allow federated homeservers to dictate a retention period that's lower than the configured minimum allowed duration in the configuration file. From f8206a4d4c553ee460aecc5087dad44ad75d7a68 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 17 Aug 2020 19:05:37 +0100 Subject: [PATCH 06/10] Incorporate review --- synapse/config/server.py | 9 +++---- tests/rest/client/test_retention.py | 40 ++++++++++++++++++++++------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index 1fe7e30a1d57..77ab6de7d3a2 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -949,11 +949,10 @@ def generate_config_section( # min_lifetime: 1d # max_lifetime: 1y - # Retention policy limits. If set, a user won't be able to send a - # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' - # that's not within this range. This is especially useful in closed federations, - # in which server admins can make sure every federating server applies the same - # rules. + # Retention policy limits. If set, and the state of a room contains a + # 'm.room.retention' event in its state which contains a 'min_lifetime' or a + # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy + # to these limits when running purge jobs. # #allowed_lifetime_min: 1d #allowed_lifetime_max: 1y diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 9f10f8ed0e68..ccc96f61b7f7 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -89,7 +89,7 @@ def test_retention_event_purged_with_state_event_outside_allowed(self): # Check that the event is purged after waiting for the maximum allowed duration # instead of the one specified in the room's policy. - self._test_retention_event_purged(room_id, one_day_ms * 3) + self._test_retention_event_purged(room_id, one_day_ms * 1.5) # Set a max_lifetime lower than the minimum allowed value. self.helper.send_state( @@ -99,12 +99,9 @@ def test_retention_event_purged_with_state_event_outside_allowed(self): tok=self.token, ) - # Check that the event hasn't been purged yet. - self._test_retention_event_purged(room_id, one_hour_ms, expect_purged=False) - # Check that the event is purged after waiting for the minimum allowed duration # instead of the one specified in the room's policy. - self._test_retention_event_purged(room_id, one_day_ms) + self._test_retention_event_purged(room_id, one_day_ms * 0.5) def test_retention_event_purged_without_state_event(self): """Tests that expired events are correctly purged when the room's retention policy @@ -156,7 +153,31 @@ def test_visibility(self): # That event should be the second, not outdated event. self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events) - def _test_retention_event_purged(self, room_id, increment, expect_purged=True): + def _test_retention_event_purged(self, room_id: str, increment: float): + """Run the following test scenario to test the message retention policy support: + + 1. Send event 1 + 2. Increment time by `increment` + 3. Send event 2 + 4. Increment time by `increment` + 5. Check that event 1 has been purged + 6. Check that event 2 has not been purged + 7. Check that state events that were sent before event 1 aren't purged. + + + The main reason for sending a second event is because currently Synapse won't + purge the latest message in a room because it would otherwise result in a lack of + forward extremities for this room. It's also a good thing to have (and with the + current ordering and checks) because if we eventually fix that, it acts as a + check that the purge jobs aren't too greedy and purge messages they shouldn't + purge. + + Args: + room_id: The ID of the room to test retention in. + increment: The number of milliseconds to advance the clock each time. Must be + defined so that events in the room aren't purged if they are `increment` + old but are purged if they are `increment * 2` old. + """ # Get the create event to, later, check that we can still access it. message_handler = self.hs.get_message_handler() create_event = self.get_success( @@ -188,10 +209,11 @@ def _test_retention_event_purged(self, room_id, increment, expect_purged=True): # one should still be kept. self.reactor.advance(increment / 1000) - # Check that the event has been purged from the database. - self.get_event(expired_event_id, expect_none=expect_purged) + # Check that the first event has been purged from the database, i.e. that we + # can't retrieve it anymore, because it has expired. + self.get_event(expired_event_id, expect_none=True) - # Check that the event that hasn't been purged can still be retrieved. + # Check that the event that hasn't expired can still be retrieved. valid_event = self.get_event(valid_event_id) self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event) From 7e1221627c6a7082953957b6a3df6bc66dcdb472 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 17 Aug 2020 23:18:41 +0100 Subject: [PATCH 07/10] Config --- docs/sample_config.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 76f18c739278..8159f1b8ea4c 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -378,11 +378,10 @@ retention: # min_lifetime: 1d # max_lifetime: 1y - # Retention policy limits. If set, a user won't be able to send a - # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' - # that's not within this range. This is especially useful in closed federations, - # in which server admins can make sure every federating server applies the same - # rules. + # Retention policy limits. If set, and the state of a room contains a + # 'm.room.retention' event in its state which contains a 'min_lifetime' or a + # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy + # to these limits when running purge jobs. # #allowed_lifetime_min: 1d #allowed_lifetime_max: 1y From f02cc3ea3a636369009ac6b0707e5e53a314e9c8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 24 Aug 2020 10:35:24 +0100 Subject: [PATCH 08/10] Update tests/rest/client/test_retention.py Co-authored-by: Patrick Cloke --- tests/rest/client/test_retention.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index ccc96f61b7f7..a2c7852f95ce 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -167,10 +167,8 @@ def _test_retention_event_purged(self, room_id: str, increment: float): The main reason for sending a second event is because currently Synapse won't purge the latest message in a room because it would otherwise result in a lack of - forward extremities for this room. It's also a good thing to have (and with the - current ordering and checks) because if we eventually fix that, it acts as a - check that the purge jobs aren't too greedy and purge messages they shouldn't - purge. + forward extremities for this room. It's also a good thing to ensure the purge jobs + aren't too greedy and purge messages they shouldn't. Args: room_id: The ID of the room to test retention in. From b6e3b7ddf45d2c6aaa1531bed567b95a34bd23c0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 24 Aug 2020 10:35:35 +0100 Subject: [PATCH 09/10] Update tests/rest/client/test_retention.py Co-authored-by: Patrick Cloke --- tests/rest/client/test_retention.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index a2c7852f95ce..b6d25a48f755 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -163,8 +163,6 @@ def _test_retention_event_purged(self, room_id: str, increment: float): 5. Check that event 1 has been purged 6. Check that event 2 has not been purged 7. Check that state events that were sent before event 1 aren't purged. - - The main reason for sending a second event is because currently Synapse won't purge the latest message in a room because it would otherwise result in a lack of forward extremities for this room. It's also a good thing to ensure the purge jobs From 16814d6f876a764186e3a93e8c65e1043475eff6 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 24 Aug 2020 11:33:13 +0100 Subject: [PATCH 10/10] Add more details to the config doc + fix typo in comment --- docs/sample_config.yaml | 10 ++++++---- synapse/config/server.py | 10 ++++++---- synapse/handlers/pagination.py | 2 +- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8159f1b8ea4c..711947256003 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -410,14 +410,16 @@ retention: # If any purge job is configured, it is strongly recommended to have at least # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' # set, or one job without 'shortest_max_lifetime' and one job without - # 'longest_max_lifetime' set. + # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if + # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a + # room's policy to these values is done after the policies are retrieved from + # Synapse's database (which is done using the range specified in a purge job's + # configuration). # #purge_jobs: - # - shortest_max_lifetime: 1d - # longest_max_lifetime: 3d + # - longest_max_lifetime: 3d # interval: 12h # - shortest_max_lifetime: 3d - # longest_max_lifetime: 1y # interval: 1d # Inhibits the /requestToken endpoints from returning an error that might leak diff --git a/synapse/config/server.py b/synapse/config/server.py index 77ab6de7d3a2..e66118c58954 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -981,14 +981,16 @@ def generate_config_section( # If any purge job is configured, it is strongly recommended to have at least # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' # set, or one job without 'shortest_max_lifetime' and one job without - # 'longest_max_lifetime' set. + # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if + # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a + # room's policy to these values is done after the policies are retrieved from + # Synapse's database (which is done using the range specified in a purge job's + # configuration). # #purge_jobs: - # - shortest_max_lifetime: 1d - # longest_max_lifetime: 3d + # - longest_max_lifetime: 3d # interval: 12h # - shortest_max_lifetime: 3d - # longest_max_lifetime: 1y # interval: 1d # Inhibits the /requestToken endpoints from returning an error that might leak diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 06e08ea83dbf..ac3418d69d9f 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -114,7 +114,7 @@ async def purge_history_for_rooms_in_range(self, min_ms, max_ms): the range to handle (inclusive). If None, it means that the range has no upper limit. """ - # We want the storage layer to to include rooms with no retention policy in its + # We want the storage layer to include rooms with no retention policy in its # return value only if a default retention policy is defined in the server's # configuration and that policy's 'max_lifetime' is either lower (or equal) than # max_ms or higher than min_ms (or both).